aboutsummaryrefslogtreecommitdiff
path: root/arch/m68k/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 15:20:36 -0700
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 15:20:36 -0700
commit1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch)
tree0bba044c4ce775e45a88a51686b5d9f90697ea9d /arch/m68k/kernel
Linux-2.6.12-rc2
Initial git repository build. I'm not bothering with the full history, even though we have it. We can create a separate "historical" git archive of that later if we want to, and in the meantime it's about 3.2GB when imported into git - space that would just make the early git days unnecessarily complicated, when we don't have a lot of good infrastructure for it. Let it rip!
Diffstat (limited to 'arch/m68k/kernel')
-rw-r--r--arch/m68k/kernel/Makefile18
-rw-r--r--arch/m68k/kernel/asm-offsets.c109
-rw-r--r--arch/m68k/kernel/bios32.c515
-rw-r--r--arch/m68k/kernel/entry.S712
-rw-r--r--arch/m68k/kernel/head.S3940
-rw-r--r--arch/m68k/kernel/ints.c281
-rw-r--r--arch/m68k/kernel/m68k_ksyms.c88
-rw-r--r--arch/m68k/kernel/module.c128
-rw-r--r--arch/m68k/kernel/process.c405
-rw-r--r--arch/m68k/kernel/ptrace.c393
-rw-r--r--arch/m68k/kernel/semaphore.c133
-rw-r--r--arch/m68k/kernel/setup.c545
-rw-r--r--arch/m68k/kernel/signal.c1025
-rw-r--r--arch/m68k/kernel/sun3-head.S104
-rw-r--r--arch/m68k/kernel/sys_m68k.c671
-rw-r--r--arch/m68k/kernel/time.c187
-rw-r--r--arch/m68k/kernel/traps.c1227
-rw-r--r--arch/m68k/kernel/vmlinux-std.lds95
-rw-r--r--arch/m68k/kernel/vmlinux-sun3.lds95
-rw-r--r--arch/m68k/kernel/vmlinux.lds.S11
20 files changed, 10682 insertions, 0 deletions
diff --git a/arch/m68k/kernel/Makefile b/arch/m68k/kernel/Makefile
new file mode 100644
index 00000000000..458925c471a
--- /dev/null
+++ b/arch/m68k/kernel/Makefile
@@ -0,0 +1,18 @@
+#
+# Makefile for the linux kernel.
+#
+
+ifndef CONFIG_SUN3
+ extra-y := head.o
+else
+ extra-y := sun3-head.o
+endif
+extra-y += vmlinux.lds
+
+obj-y := entry.o process.o traps.o ints.o signal.o ptrace.o \
+ sys_m68k.o time.o semaphore.o setup.o m68k_ksyms.o
+
+obj-$(CONFIG_PCI) += bios32.o
+obj-$(CONFIG_MODULES) += module.o
+
+EXTRA_AFLAGS := -traditional
diff --git a/arch/m68k/kernel/asm-offsets.c b/arch/m68k/kernel/asm-offsets.c
new file mode 100644
index 00000000000..cee3317b866
--- /dev/null
+++ b/arch/m68k/kernel/asm-offsets.c
@@ -0,0 +1,109 @@
+/*
+ * This program is used to generate definitions needed by
+ * assembly language modules.
+ *
+ * We use the technique used in the OSF Mach kernel code:
+ * generate asm statements containing #defines,
+ * compile this file to assembler, and then extract the
+ * #defines from the assembly-language output.
+ */
+
+#include <linux/stddef.h>
+#include <linux/sched.h>
+#include <linux/kernel_stat.h>
+#include <asm/bootinfo.h>
+#include <asm/irq.h>
+#include <asm/amigahw.h>
+#include <linux/font.h>
+
+#define DEFINE(sym, val) \
+ asm volatile("\n->" #sym " %0 " #val : : "i" (val))
+
+int main(void)
+{
+ /* offsets into the task struct */
+ DEFINE(TASK_STATE, offsetof(struct task_struct, state));
+ DEFINE(TASK_FLAGS, offsetof(struct task_struct, flags));
+ DEFINE(TASK_PTRACE, offsetof(struct task_struct, ptrace));
+ DEFINE(TASK_WORK, offsetof(struct task_struct, thread.work));
+ DEFINE(TASK_NEEDRESCHED, offsetof(struct task_struct, thread.work.need_resched));
+ DEFINE(TASK_SYSCALL_TRACE, offsetof(struct task_struct, thread.work.syscall_trace));
+ DEFINE(TASK_SIGPENDING, offsetof(struct task_struct, thread.work.sigpending));
+ DEFINE(TASK_NOTIFY_RESUME, offsetof(struct task_struct, thread.work.notify_resume));
+ DEFINE(TASK_THREAD, offsetof(struct task_struct, thread));
+ DEFINE(TASK_MM, offsetof(struct task_struct, mm));
+ DEFINE(TASK_ACTIVE_MM, offsetof(struct task_struct, active_mm));
+
+ /* offsets into the thread struct */
+ DEFINE(THREAD_KSP, offsetof(struct thread_struct, ksp));
+ DEFINE(THREAD_USP, offsetof(struct thread_struct, usp));
+ DEFINE(THREAD_SR, offsetof(struct thread_struct, sr));
+ DEFINE(THREAD_FS, offsetof(struct thread_struct, fs));
+ DEFINE(THREAD_CRP, offsetof(struct thread_struct, crp));
+ DEFINE(THREAD_ESP0, offsetof(struct thread_struct, esp0));
+ DEFINE(THREAD_FPREG, offsetof(struct thread_struct, fp));
+ DEFINE(THREAD_FPCNTL, offsetof(struct thread_struct, fpcntl));
+ DEFINE(THREAD_FPSTATE, offsetof(struct thread_struct, fpstate));
+
+ /* offsets into the pt_regs */
+ DEFINE(PT_D0, offsetof(struct pt_regs, d0));
+ DEFINE(PT_ORIG_D0, offsetof(struct pt_regs, orig_d0));
+ DEFINE(PT_D1, offsetof(struct pt_regs, d1));
+ DEFINE(PT_D2, offsetof(struct pt_regs, d2));
+ DEFINE(PT_D3, offsetof(struct pt_regs, d3));
+ DEFINE(PT_D4, offsetof(struct pt_regs, d4));
+ DEFINE(PT_D5, offsetof(struct pt_regs, d5));
+ DEFINE(PT_A0, offsetof(struct pt_regs, a0));
+ DEFINE(PT_A1, offsetof(struct pt_regs, a1));
+ DEFINE(PT_A2, offsetof(struct pt_regs, a2));
+ DEFINE(PT_PC, offsetof(struct pt_regs, pc));
+ DEFINE(PT_SR, offsetof(struct pt_regs, sr));
+ /* bitfields are a bit difficult */
+ DEFINE(PT_VECTOR, offsetof(struct pt_regs, pc) + 4);
+
+ /* offsets into the irq_handler struct */
+ DEFINE(IRQ_HANDLER, offsetof(struct irq_node, handler));
+ DEFINE(IRQ_DEVID, offsetof(struct irq_node, dev_id));
+ DEFINE(IRQ_NEXT, offsetof(struct irq_node, next));
+
+ /* offsets into the kernel_stat struct */
+ DEFINE(STAT_IRQ, offsetof(struct kernel_stat, irqs));
+
+ /* offsets into the irq_cpustat_t struct */
+ DEFINE(CPUSTAT_SOFTIRQ_PENDING, offsetof(irq_cpustat_t, __softirq_pending));
+
+ /* offsets into the bi_record struct */
+ DEFINE(BIR_TAG, offsetof(struct bi_record, tag));
+ DEFINE(BIR_SIZE, offsetof(struct bi_record, size));
+ DEFINE(BIR_DATA, offsetof(struct bi_record, data));
+
+ /* offsets into font_desc (drivers/video/console/font.h) */
+ DEFINE(FONT_DESC_IDX, offsetof(struct font_desc, idx));
+ DEFINE(FONT_DESC_NAME, offsetof(struct font_desc, name));
+ DEFINE(FONT_DESC_WIDTH, offsetof(struct font_desc, width));
+ DEFINE(FONT_DESC_HEIGHT, offsetof(struct font_desc, height));
+ DEFINE(FONT_DESC_DATA, offsetof(struct font_desc, data));
+ DEFINE(FONT_DESC_PREF, offsetof(struct font_desc, pref));
+
+ /* signal defines */
+ DEFINE(SIGSEGV, SIGSEGV);
+ DEFINE(SEGV_MAPERR, SEGV_MAPERR);
+ DEFINE(SIGTRAP, SIGTRAP);
+ DEFINE(TRAP_TRACE, TRAP_TRACE);
+
+ /* offsets into the custom struct */
+ DEFINE(CUSTOMBASE, &custom);
+ DEFINE(C_INTENAR, offsetof(struct CUSTOM, intenar));
+ DEFINE(C_INTREQR, offsetof(struct CUSTOM, intreqr));
+ DEFINE(C_INTENA, offsetof(struct CUSTOM, intena));
+ DEFINE(C_INTREQ, offsetof(struct CUSTOM, intreq));
+ DEFINE(C_SERDATR, offsetof(struct CUSTOM, serdatr));
+ DEFINE(C_SERDAT, offsetof(struct CUSTOM, serdat));
+ DEFINE(C_SERPER, offsetof(struct CUSTOM, serper));
+ DEFINE(CIAABASE, &ciaa);
+ DEFINE(CIABBASE, &ciab);
+ DEFINE(C_PRA, offsetof(struct CIA, pra));
+ DEFINE(ZTWOBASE, zTwoBase);
+
+ return 0;
+}
diff --git a/arch/m68k/kernel/bios32.c b/arch/m68k/kernel/bios32.c
new file mode 100644
index 00000000000..a901685eb6a
--- /dev/null
+++ b/arch/m68k/kernel/bios32.c
@@ -0,0 +1,515 @@
+/*
+ * bios32.c - PCI BIOS functions for m68k systems.
+ *
+ * Written by Wout Klaren.
+ *
+ * Based on the DEC Alpha bios32.c by Dave Rusling and David Mosberger.
+ */
+
+#include <linux/config.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+
+#if 0
+# define DBG_DEVS(args) printk args
+#else
+# define DBG_DEVS(args)
+#endif
+
+#ifdef CONFIG_PCI
+
+/*
+ * PCI support for Linux/m68k. Currently only the Hades is supported.
+ *
+ * The support for PCI bridges in the DEC Alpha version has
+ * been removed in this version.
+ */
+
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+
+#include <asm/io.h>
+#include <asm/pci.h>
+#include <asm/uaccess.h>
+
+#define KB 1024
+#define MB (1024*KB)
+#define GB (1024*MB)
+
+#define MAJOR_REV 0
+#define MINOR_REV 5
+
+/*
+ * Align VAL to ALIGN, which must be a power of two.
+ */
+
+#define ALIGN(val,align) (((val) + ((align) - 1)) & ~((align) - 1))
+
+/*
+ * Offsets relative to the I/O and memory base addresses from where resources
+ * are allocated.
+ */
+
+#define IO_ALLOC_OFFSET 0x00004000
+#define MEM_ALLOC_OFFSET 0x04000000
+
+/*
+ * Declarations of hardware specific initialisation functions.
+ */
+
+extern struct pci_bus_info *init_hades_pci(void);
+
+/*
+ * Bus info structure of the PCI bus. A pointer to this structure is
+ * put in the sysdata member of the pci_bus structure.
+ */
+
+static struct pci_bus_info *bus_info;
+
+static int pci_modify = 1; /* If set, layout the PCI bus ourself. */
+static int skip_vga; /* If set do not modify base addresses
+ of vga cards.*/
+static int disable_pci_burst; /* If set do not allow PCI bursts. */
+
+static unsigned int io_base;
+static unsigned int mem_base;
+
+/*
+ * static void disable_dev(struct pci_dev *dev)
+ *
+ * Disable PCI device DEV so that it does not respond to I/O or memory
+ * accesses.
+ *
+ * Parameters:
+ *
+ * dev - device to disable.
+ */
+
+static void __init disable_dev(struct pci_dev *dev)
+{
+ unsigned short cmd;
+
+ if (((dev->class >> 8 == PCI_CLASS_NOT_DEFINED_VGA) ||
+ (dev->class >> 8 == PCI_CLASS_DISPLAY_VGA) ||
+ (dev->class >> 8 == PCI_CLASS_DISPLAY_XGA)) && skip_vga)
+ return;
+
+ pci_read_config_word(dev, PCI_COMMAND, &cmd);
+
+ cmd &= (~PCI_COMMAND_IO & ~PCI_COMMAND_MEMORY & ~PCI_COMMAND_MASTER);
+ pci_write_config_word(dev, PCI_COMMAND, cmd);
+}
+
+/*
+ * static void layout_dev(struct pci_dev *dev)
+ *
+ * Layout memory and I/O for a device.
+ *
+ * Parameters:
+ *
+ * device - device to layout memory and I/O for.
+ */
+
+static void __init layout_dev(struct pci_dev *dev)
+{
+ unsigned short cmd;
+ unsigned int base, mask, size, reg;
+ unsigned int alignto;
+ int i;
+
+ /*
+ * Skip video cards if requested.
+ */
+
+ if (((dev->class >> 8 == PCI_CLASS_NOT_DEFINED_VGA) ||
+ (dev->class >> 8 == PCI_CLASS_DISPLAY_VGA) ||
+ (dev->class >> 8 == PCI_CLASS_DISPLAY_XGA)) && skip_vga)
+ return;
+
+ pci_read_config_word(dev, PCI_COMMAND, &cmd);
+
+ for (reg = PCI_BASE_ADDRESS_0, i = 0; reg <= PCI_BASE_ADDRESS_5; reg += 4, i++)
+ {
+ /*
+ * Figure out how much space and of what type this
+ * device wants.
+ */
+
+ pci_write_config_dword(dev, reg, 0xffffffff);
+ pci_read_config_dword(dev, reg, &base);
+
+ if (!base)
+ {
+ /* this base-address register is unused */
+ dev->resource[i].start = 0;
+ dev->resource[i].end = 0;
+ dev->resource[i].flags = 0;
+ continue;
+ }
+
+ /*
+ * We've read the base address register back after
+ * writing all ones and so now we must decode it.
+ */
+
+ if (base & PCI_BASE_ADDRESS_SPACE_IO)
+ {
+ /*
+ * I/O space base address register.
+ */
+
+ cmd |= PCI_COMMAND_IO;
+
+ base &= PCI_BASE_ADDRESS_IO_MASK;
+ mask = (~base << 1) | 0x1;
+ size = (mask & base) & 0xffffffff;
+
+ /*
+ * Align to multiple of size of minimum base.
+ */
+
+ alignto = max_t(unsigned int, 0x040, size);
+ base = ALIGN(io_base, alignto);
+ io_base = base + size;
+ pci_write_config_dword(dev, reg, base | PCI_BASE_ADDRESS_SPACE_IO);
+
+ dev->resource[i].start = base;
+ dev->resource[i].end = dev->resource[i].start + size - 1;
+ dev->resource[i].flags = IORESOURCE_IO | PCI_BASE_ADDRESS_SPACE_IO;
+
+ DBG_DEVS(("layout_dev: IO address: %lX\n", base));
+ }
+ else
+ {
+ unsigned int type;
+
+ /*
+ * Memory space base address register.
+ */
+
+ cmd |= PCI_COMMAND_MEMORY;
+ type = base & PCI_BASE_ADDRESS_MEM_TYPE_MASK;
+ base &= PCI_BASE_ADDRESS_MEM_MASK;
+ mask = (~base << 1) | 0x1;
+ size = (mask & base) & 0xffffffff;
+ switch (type)
+ {
+ case PCI_BASE_ADDRESS_MEM_TYPE_32:
+ case PCI_BASE_ADDRESS_MEM_TYPE_64:
+ break;
+
+ case PCI_BASE_ADDRESS_MEM_TYPE_1M:
+ printk("bios32 WARNING: slot %d, function %d "
+ "requests memory below 1MB---don't "
+ "know how to do that.\n",
+ PCI_SLOT(dev->devfn),
+ PCI_FUNC(dev->devfn));
+ continue;
+ }
+
+ /*
+ * Align to multiple of size of minimum base.
+ */
+
+ alignto = max_t(unsigned int, 0x1000, size);
+ base = ALIGN(mem_base, alignto);
+ mem_base = base + size;
+ pci_write_config_dword(dev, reg, base);
+
+ dev->resource[i].start = base;
+ dev->resource[i].end = dev->resource[i].start + size - 1;
+ dev->resource[i].flags = IORESOURCE_MEM;
+
+ if (type == PCI_BASE_ADDRESS_MEM_TYPE_64)
+ {
+ /*
+ * 64-bit address, set the highest 32 bits
+ * to zero.
+ */
+
+ reg += 4;
+ pci_write_config_dword(dev, reg, 0);
+
+ i++;
+ dev->resource[i].start = 0;
+ dev->resource[i].end = 0;
+ dev->resource[i].flags = 0;
+ }
+ }
+ }
+
+ /*
+ * Enable device:
+ */
+
+ if (dev->class >> 8 == PCI_CLASS_NOT_DEFINED ||
+ dev->class >> 8 == PCI_CLASS_NOT_DEFINED_VGA ||
+ dev->class >> 8 == PCI_CLASS_DISPLAY_VGA ||
+ dev->class >> 8 == PCI_CLASS_DISPLAY_XGA)
+ {
+ /*
+ * All of these (may) have I/O scattered all around
+ * and may not use i/o-base address registers at all.
+ * So we just have to always enable I/O to these
+ * devices.
+ */
+ cmd |= PCI_COMMAND_IO;
+ }
+
+ pci_write_config_word(dev, PCI_COMMAND, cmd | PCI_COMMAND_MASTER);
+
+ pci_write_config_byte(dev, PCI_LATENCY_TIMER, (disable_pci_burst) ? 0 : 32);
+
+ if (bus_info != NULL)
+ bus_info->conf_device(dev); /* Machine dependent configuration. */
+
+ DBG_DEVS(("layout_dev: bus %d slot 0x%x VID 0x%x DID 0x%x class 0x%x\n",
+ dev->bus->number, PCI_SLOT(dev->devfn), dev->vendor, dev->device, dev->class));
+}
+
+/*
+ * static void layout_bus(struct pci_bus *bus)
+ *
+ * Layout memory and I/O for all devices on the given bus.
+ *
+ * Parameters:
+ *
+ * bus - bus.
+ */
+
+static void __init layout_bus(struct pci_bus *bus)
+{
+ unsigned int bio, bmem;
+ struct pci_dev *dev;
+
+ DBG_DEVS(("layout_bus: starting bus %d\n", bus->number));
+
+ if (!bus->devices && !bus->children)
+ return;
+
+ /*
+ * Align the current bases on appropriate boundaries (4K for
+ * IO and 1MB for memory).
+ */
+
+ bio = io_base = ALIGN(io_base, 4*KB);
+ bmem = mem_base = ALIGN(mem_base, 1*MB);
+
+ /*
+ * PCI devices might have been setup by a PCI BIOS emulation
+ * running under TOS. In these cases there is a
+ * window during which two devices may have an overlapping
+ * address range. To avoid this causing trouble, we first
+ * turn off the I/O and memory address decoders for all PCI
+ * devices. They'll be re-enabled only once all address
+ * decoders are programmed consistently.
+ */
+
+ DBG_DEVS(("layout_bus: disable_dev for bus %d\n", bus->number));
+
+ for (dev = bus->devices; dev; dev = dev->sibling)
+ {
+ if ((dev->class >> 16 != PCI_BASE_CLASS_BRIDGE) ||
+ (dev->class >> 8 == PCI_CLASS_BRIDGE_PCMCIA))
+ disable_dev(dev);
+ }
+
+ /*
+ * Allocate space to each device:
+ */
+
+ DBG_DEVS(("layout_bus: starting bus %d devices\n", bus->number));
+
+ for (dev = bus->devices; dev; dev = dev->sibling)
+ {
+ if ((dev->class >> 16 != PCI_BASE_CLASS_BRIDGE) ||
+ (dev->class >> 8 == PCI_CLASS_BRIDGE_PCMCIA))
+ layout_dev(dev);
+ }
+
+ DBG_DEVS(("layout_bus: bus %d finished\n", bus->number));
+}
+
+/*
+ * static void pcibios_fixup(void)
+ *
+ * Layout memory and I/O of all devices on the PCI bus if 'pci_modify' is
+ * true. This might be necessary because not every m68k machine with a PCI
+ * bus has a PCI BIOS. This function should be called right after
+ * pci_scan_bus() in pcibios_init().
+ */
+
+static void __init pcibios_fixup(void)
+{
+ if (pci_modify)
+ {
+ /*
+ * Set base addresses for allocation of I/O and memory space.
+ */
+
+ io_base = bus_info->io_space.start + IO_ALLOC_OFFSET;
+ mem_base = bus_info->mem_space.start + MEM_ALLOC_OFFSET;
+
+ /*
+ * Scan the tree, allocating PCI memory and I/O space.
+ */
+
+ layout_bus(pci_bus_b(pci_root.next));
+ }
+
+ /*
+ * Fix interrupt assignments, etc.
+ */
+
+ bus_info->fixup(pci_modify);
+}
+
+/*
+ * static void pcibios_claim_resources(struct pci_bus *bus)
+ *
+ * Claim all resources that are assigned to devices on the given bus.
+ *
+ * Parameters:
+ *
+ * bus - bus.
+ */
+
+static void __init pcibios_claim_resources(struct pci_bus *bus)
+{
+ struct pci_dev *dev;
+ int i;
+
+ while (bus)
+ {
+ for (dev = bus->devices; (dev != NULL); dev = dev->sibling)
+ {
+ for (i = 0; i < PCI_NUM_RESOURCES; i++)
+ {
+ struct resource *r = &dev->resource[i];
+ struct resource *pr;
+ struct pci_bus_info *bus_info = (struct pci_bus_info *) dev->sysdata;
+
+ if ((r->start == 0) || (r->parent != NULL))
+ continue;
+#if 1
+ if (r->flags & IORESOURCE_IO)
+ pr = &bus_info->io_space;
+ else
+ pr = &bus_info->mem_space;
+#else
+ if (r->flags & IORESOURCE_IO)
+ pr = &ioport_resource;
+ else
+ pr = &iomem_resource;
+#endif
+ if (request_resource(pr, r) < 0)
+ {
+ printk(KERN_ERR "PCI: Address space collision on region %d of device %s\n", i, dev->name);
+ }
+ }
+ }
+
+ if (bus->children)
+ pcibios_claim_resources(bus->children);
+
+ bus = bus->next;
+ }
+}
+
+/*
+ * int pcibios_assign_resource(struct pci_dev *dev, int i)
+ *
+ * Assign a new address to a PCI resource.
+ *
+ * Parameters:
+ *
+ * dev - device.
+ * i - resource.
+ *
+ * Result: 0 if successful.
+ */
+
+int __init pcibios_assign_resource(struct pci_dev *dev, int i)
+{
+ struct resource *r = &dev->resource[i];
+ struct resource *pr = pci_find_parent_resource(dev, r);
+ unsigned long size = r->end + 1;
+
+ if (!pr)
+ return -EINVAL;
+
+ if (r->flags & IORESOURCE_IO)
+ {
+ if (size > 0x100)
+ return -EFBIG;
+
+ if (allocate_resource(pr, r, size, bus_info->io_space.start +
+ IO_ALLOC_OFFSET, bus_info->io_space.end, 1024))
+ return -EBUSY;
+ }
+ else
+ {
+ if (allocate_resource(pr, r, size, bus_info->mem_space.start +
+ MEM_ALLOC_OFFSET, bus_info->mem_space.end, size))
+ return -EBUSY;
+ }
+
+ if (i < 6)
+ pci_write_config_dword(dev, PCI_BASE_ADDRESS_0 + 4 * i, r->start);
+
+ return 0;
+}
+
+void __init pcibios_fixup_bus(struct pci_bus *bus)
+{
+ struct pci_dev *dev;
+ void *sysdata;
+
+ sysdata = (bus->parent) ? bus->parent->sysdata : bus->sysdata;
+
+ for (dev = bus->devices; (dev != NULL); dev = dev->sibling)
+ dev->sysdata = sysdata;
+}
+
+void __init pcibios_init(void)
+{
+ printk("Linux/m68k PCI BIOS32 revision %x.%02x\n", MAJOR_REV, MINOR_REV);
+
+ bus_info = NULL;
+#ifdef CONFIG_HADES
+ if (MACH_IS_HADES)
+ bus_info = init_hades_pci();
+#endif
+ if (bus_info != NULL)
+ {
+ printk("PCI: Probing PCI hardware\n");
+ pci_scan_bus(0, bus_info->m68k_pci_ops, bus_info);
+ pcibios_fixup();
+ pcibios_claim_resources(pci_root);
+ }
+ else
+ printk("PCI: No PCI bus detected\n");
+}
+
+char * __init pcibios_setup(char *str)
+{
+ if (!strcmp(str, "nomodify"))
+ {
+ pci_modify = 0;
+ return NULL;
+ }
+ else if (!strcmp(str, "skipvga"))
+ {
+ skip_vga = 1;
+ return NULL;
+ }
+ else if (!strcmp(str, "noburst"))
+ {
+ disable_pci_burst = 1;
+ return NULL;
+ }
+
+ return str;
+}
+#endif /* CONFIG_PCI */
diff --git a/arch/m68k/kernel/entry.S b/arch/m68k/kernel/entry.S
new file mode 100644
index 00000000000..e964015a31d
--- /dev/null
+++ b/arch/m68k/kernel/entry.S
@@ -0,0 +1,712 @@
+/* -*- mode: asm -*-
+ *
+ * linux/arch/m68k/kernel/entry.S
+ *
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file README.legal in the main directory of this archive
+ * for more details.
+ *
+ * Linux/m68k support by Hamish Macdonald
+ *
+ * 68060 fixes by Jesper Skov
+ *
+ */
+
+/*
+ * entry.S contains the system-call and fault low-level handling routines.
+ * This also contains the timer-interrupt handler, as well as all interrupts
+ * and faults that can result in a task-switch.
+ *
+ * NOTE: This code handles signal-recognition, which happens every time
+ * after a timer-interrupt and after each system call.
+ *
+ */
+
+/*
+ * 12/03/96 Jes: Currently we only support m68k single-cpu systems, so
+ * all pointers that used to be 'current' are now entry
+ * number 0 in the 'current_set' list.
+ *
+ * 6/05/00 RZ: addedd writeback completion after return from sighandler
+ * for 68040
+ */
+
+#include <linux/config.h>
+#include <linux/linkage.h>
+#include <asm/entry.h>
+#include <asm/errno.h>
+#include <asm/setup.h>
+#include <asm/segment.h>
+#include <asm/traps.h>
+#include <asm/unistd.h>
+
+#include <asm/offsets.h>
+
+.globl system_call, buserr, trap
+.globl resume, ret_from_exception
+.globl ret_from_signal
+.globl inthandler, sys_call_table
+.globl sys_fork, sys_clone, sys_vfork
+.globl ret_from_interrupt, bad_interrupt
+
+.text
+ENTRY(buserr)
+ SAVE_ALL_INT
+ GET_CURRENT(%d0)
+ movel %sp,%sp@- | stack frame pointer argument
+ bsrl buserr_c
+ addql #4,%sp
+ jra ret_from_exception
+
+ENTRY(trap)
+ SAVE_ALL_INT
+ GET_CURRENT(%d0)
+ movel %sp,%sp@- | stack frame pointer argument
+ bsrl trap_c
+ addql #4,%sp
+ jra ret_from_exception
+
+ | After a fork we jump here directly from resume,
+ | so that %d1 contains the previous task
+ | schedule_tail now used regardless of CONFIG_SMP
+ENTRY(ret_from_fork)
+ movel %d1,%sp@-
+ jsr schedule_tail
+ addql #4,%sp
+ jra ret_from_exception
+
+badsys:
+ movel #-ENOSYS,%sp@(PT_D0)
+ jra ret_from_exception
+
+do_trace:
+ movel #-ENOSYS,%sp@(PT_D0) | needed for strace
+ subql #4,%sp
+ SAVE_SWITCH_STACK
+ jbsr syscall_trace
+ RESTORE_SWITCH_STACK
+ addql #4,%sp
+ movel %sp@(PT_ORIG_D0),%d1
+ movel #-ENOSYS,%d0
+ cmpl #NR_syscalls,%d1
+ jcc 1f
+ jbsr @(sys_call_table,%d1:l:4)@(0)
+1: movel %d0,%sp@(PT_D0) | save the return value
+ subql #4,%sp | dummy return address
+ SAVE_SWITCH_STACK
+ jbsr syscall_trace
+
+ret_from_signal:
+ RESTORE_SWITCH_STACK
+ addql #4,%sp
+/* on 68040 complete pending writebacks if any */
+#ifdef CONFIG_M68040
+ bfextu %sp@(PT_VECTOR){#0,#4},%d0
+ subql #7,%d0 | bus error frame ?
+ jbne 1f
+ movel %sp,%sp@-
+ jbsr berr_040cleanup
+ addql #4,%sp
+1:
+#endif
+ jra ret_from_exception
+
+ENTRY(system_call)
+ SAVE_ALL_SYS
+
+ GET_CURRENT(%d1)
+ | save top of frame
+ movel %sp,%curptr@(TASK_THREAD+THREAD_ESP0)
+
+ tstb %curptr@(TASK_SYSCALL_TRACE)
+ jne do_trace
+ cmpl #NR_syscalls,%d0
+ jcc badsys
+ jbsr @(sys_call_table,%d0:l:4)@(0)
+ movel %d0,%sp@(PT_D0) | save the return value
+
+ |oriw #0x0700,%sr
+ movel %curptr@(TASK_WORK),%d0
+ jne syscall_exit_work
+1: RESTORE_ALL
+
+syscall_exit_work:
+ btst #5,%sp@(PT_SR) | check if returning to kernel
+ bnes 1b | if so, skip resched, signals
+ tstw %d0
+ jeq do_signal_return
+ tstb %d0
+ jne do_delayed_trace
+
+ pea resume_userspace
+ jmp schedule
+
+ret_from_exception:
+ btst #5,%sp@(PT_SR) | check if returning to kernel
+ bnes 1f | if so, skip resched, signals
+ | only allow interrupts when we are really the last one on the
+ | kernel stack, otherwise stack overflow can occur during
+ | heavy interrupt load
+ andw #ALLOWINT,%sr
+
+resume_userspace:
+ movel %curptr@(TASK_WORK),%d0
+ lsrl #8,%d0
+ jne exit_work
+1: RESTORE_ALL
+
+exit_work:
+ | save top of frame
+ movel %sp,%curptr@(TASK_THREAD+THREAD_ESP0)
+ tstb %d0
+ jeq do_signal_return
+
+ pea resume_userspace
+ jmp schedule
+
+do_signal_return:
+ |andw #ALLOWINT,%sr
+ subql #4,%sp | dummy return address
+ SAVE_SWITCH_STACK
+ pea %sp@(SWITCH_STACK_SIZE)
+ clrl %sp@-
+ bsrl do_signal
+ addql #8,%sp
+ RESTORE_SWITCH_STACK
+ addql #4,%sp
+ jbra resume_userspace
+
+do_delayed_trace:
+ bclr #7,%sp@(PT_SR) | clear trace bit in SR
+ pea 1 | send SIGTRAP
+ movel %curptr,%sp@-
+ pea LSIGTRAP
+ jbsr send_sig
+ addql #8,%sp
+ addql #4,%sp
+ jbra resume_userspace
+
+
+#if 0
+#ifdef CONFIG_AMIGA
+ami_inthandler:
+ addql #1,irq_stat+CPUSTAT_LOCAL_IRQ_COUNT
+ SAVE_ALL_INT
+ GET_CURRENT(%d0)
+
+ bfextu %sp@(PT_VECTOR){#4,#12},%d0
+ movel %d0,%a0
+ addql #1,%a0@(kstat+STAT_IRQ-VECOFF(VEC_SPUR))
+ movel %a0@(autoirq_list-VECOFF(VEC_SPUR)),%a0
+
+| amiga vector int handler get the req mask instead of irq vector
+ lea CUSTOMBASE,%a1
+ movew %a1@(C_INTREQR),%d0
+ andw %a1@(C_INTENAR),%d0
+
+| prepare stack (push frame pointer, dev_id & req mask)
+ pea %sp@
+ movel %a0@(IRQ_DEVID),%sp@-
+ movel %d0,%sp@-
+ pea %pc@(ret_from_interrupt:w)
+ jbra @(IRQ_HANDLER,%a0)@(0)
+
+ENTRY(nmi_handler)
+ rte
+#endif
+#endif
+
+/*
+** This is the main interrupt handler, responsible for calling process_int()
+*/
+inthandler:
+ SAVE_ALL_INT
+ GET_CURRENT(%d0)
+ addqb #1,%curptr@(TASK_INFO+TINFO_PREEMPT+2)
+ | put exception # in d0
+ bfextu %sp@(PT_VECTOR){#4,#10},%d0
+
+ movel %sp,%sp@-
+ movel %d0,%sp@- | put vector # on stack
+#if defined(MACH_Q40_ONLY) && defined(CONFIG_BLK_DEV_FD)
+ btstb #4,0xff000000 | Q40 floppy needs very special treatment ...
+ jbeq 1f
+ btstb #3,0xff000004
+ jbeq 1f
+ jbsr floppy_hardint
+ jbra 3f
+1:
+#endif
+ jbsr process_int | process the IRQ
+3: addql #8,%sp | pop parameters off stack
+
+ret_from_interrupt:
+ subqb #1,%curptr@(TASK_INFO+TINFO_PREEMPT+2)
+ jeq 1f
+2:
+ RESTORE_ALL
+1:
+ moveq #(~ALLOWINT>>8)&0xff,%d0
+ andb %sp@(PT_SR),%d0
+ jne 2b
+
+ /* check if we need to do software interrupts */
+ tstl irq_stat+CPUSTAT_SOFTIRQ_PENDING
+ jeq ret_from_exception
+ pea ret_from_exception
+ jra do_softirq
+
+
+/* Handler for uninitialized and spurious interrupts */
+
+bad_interrupt:
+ addql #1,num_spurious
+ rte
+
+ENTRY(sys_fork)
+ SAVE_SWITCH_STACK
+ pea %sp@(SWITCH_STACK_SIZE)
+ jbsr m68k_fork
+ addql #4,%sp
+ RESTORE_SWITCH_STACK
+ rts
+
+ENTRY(sys_clone)
+ SAVE_SWITCH_STACK
+ pea %sp@(SWITCH_STACK_SIZE)
+ jbsr m68k_clone
+ addql #4,%sp
+ RESTORE_SWITCH_STACK
+ rts
+
+ENTRY(sys_vfork)
+ SAVE_SWITCH_STACK
+ pea %sp@(SWITCH_STACK_SIZE)
+ jbsr m68k_vfork
+ addql #4,%sp
+ RESTORE_SWITCH_STACK
+ rts
+
+ENTRY(sys_sigsuspend)
+ SAVE_SWITCH_STACK
+ pea %sp@(SWITCH_STACK_SIZE)
+ jbsr do_sigsuspend
+ addql #4,%sp
+ RESTORE_SWITCH_STACK
+ rts
+
+ENTRY(sys_rt_sigsuspend)
+ SAVE_SWITCH_STACK
+ pea %sp@(SWITCH_STACK_SIZE)
+ jbsr do_rt_sigsuspend
+ addql #4,%sp
+ RESTORE_SWITCH_STACK
+ rts
+
+ENTRY(sys_sigreturn)
+ SAVE_SWITCH_STACK
+ jbsr do_sigreturn
+ RESTORE_SWITCH_STACK
+ rts
+
+ENTRY(sys_rt_sigreturn)
+ SAVE_SWITCH_STACK
+ jbsr do_rt_sigreturn
+ RESTORE_SWITCH_STACK
+ rts
+
+resume:
+ /*
+ * Beware - when entering resume, prev (the current task) is
+ * in a0, next (the new task) is in a1,so don't change these
+ * registers until their contents are no longer needed.
+ */
+
+ /* save sr */
+ movew %sr,%a0@(TASK_THREAD+THREAD_SR)
+
+ /* save fs (sfc,%dfc) (may be pointing to kernel memory) */
+ movec %sfc,%d0
+ movew %d0,%a0@(TASK_THREAD+THREAD_FS)
+
+ /* save usp */
+ /* it is better to use a movel here instead of a movew 8*) */
+ movec %usp,%d0
+ movel %d0,%a0@(TASK_THREAD+THREAD_USP)
+
+ /* save non-scratch registers on stack */
+ SAVE_SWITCH_STACK
+
+ /* save current kernel stack pointer */
+ movel %sp,%a0@(TASK_THREAD+THREAD_KSP)
+
+ /* save floating point context */
+#ifndef CONFIG_M68KFPU_EMU_ONLY
+#ifdef CONFIG_M68KFPU_EMU
+ tstl m68k_fputype
+ jeq 3f
+#endif
+ fsave %a0@(TASK_THREAD+THREAD_FPSTATE)
+
+#if defined(CONFIG_M68060)
+#if !defined(CPU_M68060_ONLY)
+ btst #3,m68k_cputype+3
+ beqs 1f
+#endif
+ /* The 060 FPU keeps status in bits 15-8 of the first longword */
+ tstb %a0@(TASK_THREAD+THREAD_FPSTATE+2)
+ jeq 3f
+#if !defined(CPU_M68060_ONLY)
+ jra 2f
+#endif
+#endif /* CONFIG_M68060 */
+#if !defined(CPU_M68060_ONLY)
+1: tstb %a0@(TASK_THREAD+THREAD_FPSTATE)
+ jeq 3f
+#endif
+2: fmovemx %fp0-%fp7,%a0@(TASK_THREAD+THREAD_FPREG)
+ fmoveml %fpcr/%fpsr/%fpiar,%a0@(TASK_THREAD+THREAD_FPCNTL)
+3:
+#endif /* CONFIG_M68KFPU_EMU_ONLY */
+ /* Return previous task in %d1 */
+ movel %curptr,%d1
+
+ /* switch to new task (a1 contains new task) */
+ movel %a1,%curptr
+
+ /* restore floating point context */
+#ifndef CONFIG_M68KFPU_EMU_ONLY
+#ifdef CONFIG_M68KFPU_EMU
+ tstl m68k_fputype
+ jeq 4f
+#endif
+#if defined(CONFIG_M68060)
+#if !defined(CPU_M68060_ONLY)
+ btst #3,m68k_cputype+3
+ beqs 1f
+#endif
+ /* The 060 FPU keeps status in bits 15-8 of the first longword */
+ tstb %a1@(TASK_THREAD+THREAD_FPSTATE+2)
+ jeq 3f
+#if !defined(CPU_M68060_ONLY)
+ jra 2f
+#endif
+#endif /* CONFIG_M68060 */
+#if !defined(CPU_M68060_ONLY)
+1: tstb %a1@(TASK_THREAD+THREAD_FPSTATE)
+ jeq 3f
+#endif
+2: fmovemx %a1@(TASK_THREAD+THREAD_FPREG),%fp0-%fp7
+ fmoveml %a1@(TASK_THREAD+THREAD_FPCNTL),%fpcr/%fpsr/%fpiar
+3: frestore %a1@(TASK_THREAD+THREAD_FPSTATE)
+4:
+#endif /* CONFIG_M68KFPU_EMU_ONLY */
+
+ /* restore the kernel stack pointer */
+ movel %a1@(TASK_THREAD+THREAD_KSP),%sp
+
+ /* restore non-scratch registers */
+ RESTORE_SWITCH_STACK
+
+ /* restore user stack pointer */
+ movel %a1@(TASK_THREAD+THREAD_USP),%a0
+ movel %a0,%usp
+
+ /* restore fs (sfc,%dfc) */
+ movew %a1@(TASK_THREAD+THREAD_FS),%a0
+ movec %a0,%sfc
+ movec %a0,%dfc
+
+ /* restore status register */
+ movew %a1@(TASK_THREAD+THREAD_SR),%sr
+
+ rts
+
+.data
+ALIGN
+sys_call_table:
+ .long sys_ni_syscall /* 0 - old "setup()" system call*/
+ .long sys_exit
+ .long sys_fork
+ .long sys_read
+ .long sys_write
+ .long sys_open /* 5 */
+ .long sys_close
+ .long sys_waitpid
+ .long sys_creat
+ .long sys_link
+ .long sys_unlink /* 10 */
+ .long sys_execve
+ .long sys_chdir
+ .long sys_time
+ .long sys_mknod
+ .long sys_chmod /* 15 */
+ .long sys_chown16
+ .long sys_ni_syscall /* old break syscall holder */
+ .long sys_stat
+ .long sys_lseek
+ .long sys_getpid /* 20 */
+ .long sys_mount
+ .long sys_oldumount
+ .long sys_setuid16
+ .long sys_getuid16
+ .long sys_stime /* 25 */
+ .long sys_ptrace
+ .long sys_alarm
+ .long sys_fstat
+ .long sys_pause
+ .long sys_utime /* 30 */
+ .long sys_ni_syscall /* old stty syscall holder */
+ .long sys_ni_syscall /* old gtty syscall holder */
+ .long sys_access
+ .long sys_nice
+ .long sys_ni_syscall /* 35 */ /* old ftime syscall holder */
+ .long sys_sync
+ .long sys_kill
+ .long sys_rename
+ .long sys_mkdir
+ .long sys_rmdir /* 40 */
+ .long sys_dup
+ .long sys_pipe
+ .long sys_times
+ .long sys_ni_syscall /* old prof syscall holder */
+ .long sys_brk /* 45 */
+ .long sys_setgid16
+ .long sys_getgid16
+ .long sys_signal
+ .long sys_geteuid16
+ .long sys_getegid16 /* 50 */
+ .long sys_acct
+ .long sys_umount /* recycled never used phys() */
+ .long sys_ni_syscall /* old lock syscall holder */
+ .long sys_ioctl
+ .long sys_fcntl /* 55 */
+ .long sys_ni_syscall /* old mpx syscall holder */
+ .long sys_setpgid
+ .long sys_ni_syscall /* old ulimit syscall holder */
+ .long sys_ni_syscall
+ .long sys_umask /* 60 */
+ .long sys_chroot
+ .long sys_ustat
+ .long sys_dup2
+ .long sys_getppid
+ .long sys_getpgrp /* 65 */
+ .long sys_setsid
+ .long sys_sigaction
+ .long sys_sgetmask
+ .long sys_ssetmask
+ .long sys_setreuid16 /* 70 */
+ .long sys_setregid16
+ .long sys_sigsuspend
+ .long sys_sigpending
+ .long sys_sethostname
+ .long sys_setrlimit /* 75 */
+ .long sys_old_getrlimit
+ .long sys_getrusage
+ .long sys_gettimeofday
+ .long sys_settimeofday
+ .long sys_getgroups16 /* 80 */
+ .long sys_setgroups16
+ .long old_select
+ .long sys_symlink
+ .long sys_lstat
+ .long sys_readlink /* 85 */
+ .long sys_uselib
+ .long sys_swapon
+ .long sys_reboot
+ .long old_readdir
+ .long old_mmap /* 90 */
+ .long sys_munmap
+ .long sys_truncate
+ .long sys_ftruncate
+ .long sys_fchmod
+ .long sys_fchown16 /* 95 */
+ .long sys_getpriority
+ .long sys_setpriority
+ .long sys_ni_syscall /* old profil syscall holder */
+ .long sys_statfs
+ .long sys_fstatfs /* 100 */
+ .long sys_ni_syscall /* ioperm for i386 */
+ .long sys_socketcall
+ .long sys_syslog
+ .long sys_setitimer
+ .long sys_getitimer /* 105 */
+ .long sys_newstat
+ .long sys_newlstat
+ .long sys_newfstat
+ .long sys_ni_syscall
+ .long sys_ni_syscall /* 110 */ /* iopl for i386 */
+ .long sys_vhangup
+ .long sys_ni_syscall /* obsolete idle() syscall */
+ .long sys_ni_syscall /* vm86old for i386 */
+ .long sys_wait4
+ .long sys_swapoff /* 115 */
+ .long sys_sysinfo
+ .long sys_ipc
+ .long sys_fsync
+ .long sys_sigreturn
+ .long sys_clone /* 120 */
+ .long sys_setdomainname
+ .long sys_newuname
+ .long sys_cacheflush /* modify_ldt for i386 */
+ .long sys_adjtimex
+ .long sys_mprotect /* 125 */
+ .long sys_sigprocmask
+ .long sys_ni_syscall /* old "create_module" */
+ .long sys_init_module
+ .long sys_delete_module
+ .long sys_ni_syscall /* 130 - old "get_kernel_syms" */
+ .long sys_quotactl
+ .long sys_getpgid
+ .long sys_fchdir
+ .long sys_bdflush
+ .long sys_sysfs /* 135 */
+ .long sys_personality
+ .long sys_ni_syscall /* for afs_syscall */
+ .long sys_setfsuid16
+ .long sys_setfsgid16
+ .long sys_llseek /* 140 */
+ .long sys_getdents
+ .long sys_select
+ .long sys_flock
+ .long sys_msync
+ .long sys_readv /* 145 */
+ .long sys_writev
+ .long sys_getsid
+ .long sys_fdatasync
+ .long sys_sysctl
+ .long sys_mlock /* 150 */
+ .long sys_munlock
+ .long sys_mlockall
+ .long sys_munlockall
+ .long sys_sched_setparam
+ .long sys_sched_getparam /* 155 */
+ .long sys_sched_setscheduler
+ .long sys_sched_getscheduler
+ .long sys_sched_yield
+ .long sys_sched_get_priority_max
+ .long sys_sched_get_priority_min /* 160 */
+ .long sys_sched_rr_get_interval
+ .long sys_nanosleep
+ .long sys_mremap
+ .long sys_setresuid16
+ .long sys_getresuid16 /* 165 */
+ .long sys_getpagesize
+ .long sys_ni_syscall /* old sys_query_module */
+ .long sys_poll
+ .long sys_nfsservctl
+ .long sys_setresgid16 /* 170 */
+ .long sys_getresgid16
+ .long sys_prctl
+ .long sys_rt_sigreturn
+ .long sys_rt_sigaction
+ .long sys_rt_sigprocmask /* 175 */
+ .long sys_rt_sigpending
+ .long sys_rt_sigtimedwait
+ .long sys_rt_sigqueueinfo
+ .long sys_rt_sigsuspend
+ .long sys_pread64 /* 180 */
+ .long sys_pwrite64
+ .long sys_lchown16;
+ .long sys_getcwd
+ .long sys_capget
+ .long sys_capset /* 185 */
+ .long sys_sigaltstack
+ .long sys_sendfile
+ .long sys_ni_syscall /* streams1 */
+ .long sys_ni_syscall /* streams2 */
+ .long sys_vfork /* 190 */
+ .long sys_getrlimit
+ .long sys_mmap2
+ .long sys_truncate64
+ .long sys_ftruncate64
+ .long sys_stat64 /* 195 */
+ .long sys_lstat64
+ .long sys_fstat64
+ .long sys_chown
+ .long sys_getuid
+ .long sys_getgid /* 200 */
+ .long sys_geteuid
+ .long sys_getegid
+ .long sys_setreuid
+ .long sys_setregid
+ .long sys_getgroups /* 205 */
+ .long sys_setgroups
+ .long sys_fchown
+ .long sys_setresuid
+ .long sys_getresuid
+ .long sys_setresgid /* 210 */
+ .long sys_getresgid
+ .long sys_lchown
+ .long sys_setuid
+ .long sys_setgid
+ .long sys_setfsuid /* 215 */
+ .long sys_setfsgid
+ .long sys_pivot_root
+ .long sys_ni_syscall
+ .long sys_ni_syscall
+ .long sys_getdents64 /* 220 */
+ .long sys_gettid
+ .long sys_tkill
+ .long sys_setxattr
+ .long sys_lsetxattr
+ .long sys_fsetxattr /* 225 */
+ .long sys_getxattr
+ .long sys_lgetxattr
+ .long sys_fgetxattr
+ .long sys_listxattr
+ .long sys_llistxattr /* 230 */
+ .long sys_flistxattr
+ .long sys_removexattr
+ .long sys_lremovexattr
+ .long sys_fremovexattr
+ .long sys_futex /* 235 */
+ .long sys_sendfile64
+ .long sys_mincore
+ .long sys_madvise
+ .long sys_fcntl64
+ .long sys_readahead /* 240 */
+ .long sys_io_setup
+ .long sys_io_destroy
+ .long sys_io_getevents
+ .long sys_io_submit
+ .long sys_io_cancel /* 245 */
+ .long sys_fadvise64
+ .long sys_exit_group
+ .long sys_lookup_dcookie
+ .long sys_epoll_create
+ .long sys_epoll_ctl /* 250 */
+ .long sys_epoll_wait
+ .long sys_remap_file_pages
+ .long sys_set_tid_address
+ .long sys_timer_create
+ .long sys_timer_settime /* 255 */
+ .long sys_timer_gettime
+ .long sys_timer_getoverrun
+ .long sys_timer_delete
+ .long sys_clock_settime
+ .long sys_clock_gettime /* 260 */
+ .long sys_clock_getres
+ .long sys_clock_nanosleep
+ .long sys_statfs64
+ .long sys_fstatfs64
+ .long sys_tgkill /* 265 */
+ .long sys_utimes
+ .long sys_fadvise64_64
+ .long sys_mbind
+ .long sys_get_mempolicy
+ .long sys_set_mempolicy /* 270 */
+ .long sys_mq_open
+ .long sys_mq_unlink
+ .long sys_mq_timedsend
+ .long sys_mq_timedreceive
+ .long sys_mq_notify /* 275 */
+ .long sys_mq_getsetattr
+ .long sys_waitid
+ .long sys_ni_syscall /* for sys_vserver */
+ .long sys_add_key
+ .long sys_request_key /* 280 */
+ .long sys_keyctl
+
diff --git a/arch/m68k/kernel/head.S b/arch/m68k/kernel/head.S
new file mode 100644
index 00000000000..7cd6de17c20
--- /dev/null
+++ b/arch/m68k/kernel/head.S
@@ -0,0 +1,3940 @@
+/* -*- mode: asm -*-
+**
+** head.S -- This file contains the initial boot code for the
+** Linux/68k kernel.
+**
+** Copyright 1993 by Hamish Macdonald
+**
+** 68040 fixes by Michael Rausch
+** 68060 fixes by Roman Hodek
+** MMU cleanup by Randy Thelen
+** Final MMU cleanup by Roman Zippel
+**
+** Atari support by Andreas Schwab, using ideas of Robert de Vries
+** and Bjoern Brauel
+** VME Support by Richard Hirst
+**
+** 94/11/14 Andreas Schwab: put kernel at PAGESIZE
+** 94/11/18 Andreas Schwab: remove identity mapping of STRAM for Atari
+** ++ Bjoern & Roman: ATARI-68040 support for the Medusa
+** 95/11/18 Richard Hirst: Added MVME166 support
+** 96/04/26 Guenther Kelleter: fixed identity mapping for Falcon with
+** Magnum- and FX-alternate ram
+** 98/04/25 Phil Blundell: added HP300 support
+** 1998/08/30 David Kilzer: Added support for font_desc structures
+** for linux-2.1.115
+** 9/02/11 Richard Zidlicky: added Q40 support (initial vesion 99/01/01)
+** 2004/05/13 Kars de Jong: Finalised HP300 support
+**
+** This file is subject to the terms and conditions of the GNU General Public
+** License. See the file README.legal in the main directory of this archive
+** for more details.
+**
+*/
+
+/*
+ * Linux startup code.
+ *
+ * At this point, the boot loader has:
+ * Disabled interrupts
+ * Disabled caches
+ * Put us in supervisor state.
+ *
+ * The kernel setup code takes the following steps:
+ * . Raise interrupt level
+ * . Set up initial kernel memory mapping.
+ * . This sets up a mapping of the 4M of memory the kernel is located in.
+ * . It also does a mapping of any initial machine specific areas.
+ * . Enable the MMU
+ * . Enable cache memories
+ * . Jump to kernel startup
+ *
+ * Much of the file restructuring was to accomplish:
+ * 1) Remove register dependency through-out the file.
+ * 2) Increase use of subroutines to perform functions
+ * 3) Increase readability of the code
+ *
+ * Of course, readability is a subjective issue, so it will never be
+ * argued that that goal was accomplished. It was merely a goal.
+ * A key way to help make code more readable is to give good
+ * documentation. So, the first thing you will find is exaustive
+ * write-ups on the structure of the file, and the features of the
+ * functional subroutines.
+ *
+ * General Structure:
+ * ------------------
+ * Without a doubt the single largest chunk of head.S is spent
+ * mapping the kernel and I/O physical space into the logical range
+ * for the kernel.
+ * There are new subroutines and data structures to make MMU
+ * support cleaner and easier to understand.
+ * First, you will find a routine call "mmu_map" which maps
+ * a logical to a physical region for some length given a cache
+ * type on behalf of the caller. This routine makes writing the
+ * actual per-machine specific code very simple.
+ * A central part of the code, but not a subroutine in itself,
+ * is the mmu_init code which is broken down into mapping the kernel
+ * (the same for all machines) and mapping machine-specific I/O
+ * regions.
+ * Also, there will be a description of engaging the MMU and
+ * caches.
+ * You will notice that there is a chunk of code which
+ * can emit the entire MMU mapping of the machine. This is present
+ * only in debug modes and can be very helpful.
+ * Further, there is a new console driver in head.S that is
+ * also only engaged in debug mode. Currently, it's only supported
+ * on the Macintosh class of machines. However, it is hoped that
+ * others will plug-in support for specific machines.
+ *
+ * ######################################################################
+ *
+ * mmu_map
+ * -------
+ * mmu_map was written for two key reasons. First, it was clear
+ * that it was very difficult to read the previous code for mapping
+ * regions of memory. Second, the Macintosh required such extensive
+ * memory allocations that it didn't make sense to propagate the
+ * existing code any further.
+ * mmu_map requires some parameters:
+ *
+ * mmu_map (logical, physical, length, cache_type)
+ *
+ * While this essentially describes the function in the abstract, you'll
+ * find more indepth description of other parameters at the implementation site.
+ *
+ * mmu_get_root_table_entry
+ * ------------------------
+ * mmu_get_ptr_table_entry
+ * -----------------------
+ * mmu_get_page_table_entry
+ * ------------------------
+ *
+ * These routines are used by other mmu routines to get a pointer into
+ * a table, if necessary a new table is allocated. These routines are working
+ * basically like pmd_alloc() and pte_alloc() in <asm/pgtable.h>. The root
+ * table needs of course only to be allocated once in mmu_get_root_table_entry,
+ * so that here also some mmu specific initialization is done. The second page
+ * at the start of the kernel (the first page is unmapped later) is used for
+ * the kernel_pg_dir. It must be at a position known at link time (as it's used
+ * to initialize the init task struct) and since it needs special cache
+ * settings, it's the easiest to use this page, the rest of the page is used
+ * for further pointer tables.
+ * mmu_get_page_table_entry allocates always a whole page for page tables, this
+ * means 1024 pages and so 4MB of memory can be mapped. It doesn't make sense
+ * to manage page tables in smaller pieces as nearly all mappings have that
+ * size.
+ *
+ * ######################################################################
+ *
+ *
+ * ######################################################################
+ *
+ * mmu_engage
+ * ----------
+ * Thanks to a small helping routine enabling the mmu got quite simple
+ * and there is only one way left. mmu_engage makes a complete a new mapping
+ * that only includes the absolute necessary to be able to jump to the final
+ * postion and to restore the original mapping.
+ * As this code doesn't need a transparent translation register anymore this
+ * means all registers are free to be used by machines that needs them for
+ * other purposes.
+ *
+ * ######################################################################
+ *
+ * mmu_print
+ * ---------
+ * This algorithm will print out the page tables of the system as
+ * appropriate for an 030 or an 040. This is useful for debugging purposes
+ * and as such is enclosed in #ifdef MMU_PRINT/#endif clauses.
+ *
+ * ######################################################################
+ *
+ * console_init
+ * ------------
+ * The console is also able to be turned off. The console in head.S
+ * is specifically for debugging and can be very useful. It is surrounded by
+ * #ifdef CONSOLE/#endif clauses so it doesn't have to ship in known-good
+ * kernels. It's basic algorithm is to determine the size of the screen
+ * (in height/width and bit depth) and then use that information for
+ * displaying an 8x8 font or an 8x16 (widthxheight). I prefer the 8x8 for
+ * debugging so I can see more good data. But it was trivial to add support
+ * for both fonts, so I included it.
+ * Also, the algorithm for plotting pixels is abstracted so that in
+ * theory other platforms could add support for different kinds of frame
+ * buffers. This could be very useful.
+ *
+ * console_put_penguin
+ * -------------------
+ * An important part of any Linux bring up is the penguin and there's
+ * nothing like getting the Penguin on the screen! This algorithm will work
+ * on any machine for which there is a console_plot_pixel.
+ *
+ * console_scroll
+ * --------------
+ * My hope is that the scroll algorithm does the right thing on the
+ * various platforms, but it wouldn't be hard to add the test conditions
+ * and new code if it doesn't.
+ *
+ * console_putc
+ * -------------
+ *
+ * ######################################################################
+ *
+ * Register usage has greatly simplified within head.S. Every subroutine
+ * saves and restores all registers that it modifies (except it returns a
+ * value in there of course). So the only register that needs to be initialized
+ * is the stack pointer.
+ * All other init code and data is now placed in the init section, so it will
+ * be automatically freed at the end of the kernel initialization.
+ *
+ * ######################################################################
+ *
+ * options
+ * -------
+ * There are many options available in a build of this file. I've
+ * taken the time to describe them here to save you the time of searching
+ * for them and trying to understand what they mean.
+ *
+ * CONFIG_xxx: These are the obvious machine configuration defines created
+ * during configuration. These are defined in include/linux/autoconf.h.
+ *
+ * CONSOLE: There is support for head.S console in this file. This
+ * console can talk to a Mac frame buffer, but could easily be extrapolated
+ * to extend it to support other platforms.
+ *
+ * TEST_MMU: This is a test harness for running on any given machine but
+ * getting an MMU dump for another class of machine. The classes of machines
+ * that can be tested are any of the makes (Atari, Amiga, Mac, VME, etc.)
+ * and any of the models (030, 040, 060, etc.).
+ *
+ * NOTE: TEST_MMU is NOT permanent! It is scheduled to be removed
+ * When head.S boots on Atari, Amiga, Macintosh, and VME
+ * machines. At that point the underlying logic will be
+ * believed to be solid enough to be trusted, and TEST_MMU
+ * can be dropped. Do note that that will clean up the
+ * head.S code significantly as large blocks of #if/#else
+ * clauses can be removed.
+ *
+ * MMU_NOCACHE_KERNEL: On the Macintosh platform there was an inquiry into
+ * determing why devices don't appear to work. A test case was to remove
+ * the cacheability of the kernel bits.
+ *
+ * MMU_PRINT: There is a routine built into head.S that can display the
+ * MMU data structures. It outputs its result through the serial_putc
+ * interface. So where ever that winds up driving data, that's where the
+ * mmu struct will appear. On the Macintosh that's typically the console.
+ *
+ * SERIAL_DEBUG: There are a series of putc() macro statements
+ * scattered through out the code to give progress of status to the
+ * person sitting at the console. This constant determines whether those
+ * are used.
+ *
+ * DEBUG: This is the standard DEBUG flag that can be set for building
+ * the kernel. It has the effect adding additional tests into
+ * the code.
+ *
+ * FONT_6x11:
+ * FONT_8x8:
+ * FONT_8x16:
+ * In theory these could be determined at run time or handed
+ * over by the booter. But, let's be real, it's a fine hard
+ * coded value. (But, you will notice the code is run-time
+ * flexible!) A pointer to the font's struct font_desc
+ * is kept locally in Lconsole_font. It is used to determine
+ * font size information dynamically.
+ *
+ * Atari constants:
+ * USE_PRINTER: Use the printer port for serial debug.
+ * USE_SCC_B: Use the SCC port A (Serial2) for serial debug.
+ * USE_SCC_A: Use the SCC port B (Modem2) for serial debug.
+ * USE_MFP: Use the ST-MFP port (Modem1) for serial debug.
+ *
+ * Macintosh constants:
+ * MAC_SERIAL_DEBUG: Turns on serial debug output for the Macintosh.
+ * MAC_USE_SCC_A: Use the SCC port A (modem) for serial debug.
+ * MAC_USE_SCC_B: Use the SCC port B (printer) for serial debug (default).
+ */
+
+#include <linux/config.h>
+#include <linux/linkage.h>
+#include <linux/init.h>
+#include <asm/bootinfo.h>
+#include <asm/setup.h>
+#include <asm/entry.h>
+#include <asm/pgtable.h>
+#include <asm/page.h>
+#include <asm/offsets.h>
+
+#ifdef CONFIG_MAC
+
+#include <asm/machw.h>
+
+/*
+ * Macintosh console support
+ */
+
+#define CONSOLE
+#define CONSOLE_PENGUIN
+
+/*
+ * Macintosh serial debug support; outputs boot info to the printer
+ * and/or modem serial ports
+ */
+#undef MAC_SERIAL_DEBUG
+
+/*
+ * Macintosh serial debug port selection; define one or both;
+ * requires MAC_SERIAL_DEBUG to be defined
+ */
+#define MAC_USE_SCC_A /* Macintosh modem serial port */
+#define MAC_USE_SCC_B /* Macintosh printer serial port */
+
+#endif /* CONFIG_MAC */
+
+#undef MMU_PRINT
+#undef MMU_NOCACHE_KERNEL
+#define SERIAL_DEBUG
+#undef DEBUG
+
+/*
+ * For the head.S console, there are three supported fonts, 6x11, 8x16 and 8x8.
+ * The 8x8 font is harder to read but fits more on the screen.
+ */
+#define FONT_8x8 /* default */
+/* #define FONT_8x16 */ /* 2nd choice */
+/* #define FONT_6x11 */ /* 3rd choice */
+
+.globl kernel_pg_dir
+.globl availmem
+.globl m68k_pgtable_cachemode
+.globl m68k_supervisor_cachemode
+#ifdef CONFIG_MVME16x
+.globl mvme_bdid
+#endif
+#ifdef CONFIG_Q40
+.globl q40_mem_cptr
+#endif
+
+CPUTYPE_040 = 1 /* indicates an 040 */
+CPUTYPE_060 = 2 /* indicates an 060 */
+CPUTYPE_0460 = 3 /* if either above are set, this is set */
+CPUTYPE_020 = 4 /* indicates an 020 */
+
+/* Translation control register */
+TC_ENABLE = 0x8000
+TC_PAGE8K = 0x4000
+TC_PAGE4K = 0x0000
+
+/* Transparent translation registers */
+TTR_ENABLE = 0x8000 /* enable transparent translation */
+TTR_ANYMODE = 0x4000 /* user and kernel mode access */
+TTR_KERNELMODE = 0x2000 /* only kernel mode access */
+TTR_USERMODE = 0x0000 /* only user mode access */
+TTR_CI = 0x0400 /* inhibit cache */
+TTR_RW = 0x0200 /* read/write mode */
+TTR_RWM = 0x0100 /* read/write mask */
+TTR_FCB2 = 0x0040 /* function code base bit 2 */
+TTR_FCB1 = 0x0020 /* function code base bit 1 */
+TTR_FCB0 = 0x0010 /* function code base bit 0 */
+TTR_FCM2 = 0x0004 /* function code mask bit 2 */
+TTR_FCM1 = 0x0002 /* function code mask bit 1 */
+TTR_FCM0 = 0x0001 /* function code mask bit 0 */
+
+/* Cache Control registers */
+CC6_ENABLE_D = 0x80000000 /* enable data cache (680[46]0) */
+CC6_FREEZE_D = 0x40000000 /* freeze data cache (68060) */
+CC6_ENABLE_SB = 0x20000000 /* enable store buffer (68060) */
+CC6_PUSH_DPI = 0x10000000 /* disable CPUSH invalidation (68060) */
+CC6_HALF_D = 0x08000000 /* half-cache mode for data cache (68060) */
+CC6_ENABLE_B = 0x00800000 /* enable branch cache (68060) */
+CC6_CLRA_B = 0x00400000 /* clear all entries in branch cache (68060) */
+CC6_CLRU_B = 0x00200000 /* clear user entries in branch cache (68060) */
+CC6_ENABLE_I = 0x00008000 /* enable instruction cache (680[46]0) */
+CC6_FREEZE_I = 0x00004000 /* freeze instruction cache (68060) */
+CC6_HALF_I = 0x00002000 /* half-cache mode for instruction cache (68060) */
+CC3_ALLOC_WRITE = 0x00002000 /* write allocate mode(68030) */
+CC3_ENABLE_DB = 0x00001000 /* enable data burst (68030) */
+CC3_CLR_D = 0x00000800 /* clear data cache (68030) */
+CC3_CLRE_D = 0x00000400 /* clear entry in data cache (68030) */
+CC3_FREEZE_D = 0x00000200 /* freeze data cache (68030) */
+CC3_ENABLE_D = 0x00000100 /* enable data cache (68030) */
+CC3_ENABLE_IB = 0x00000010 /* enable instruction burst (68030) */
+CC3_CLR_I = 0x00000008 /* clear instruction cache (68030) */
+CC3_CLRE_I = 0x00000004 /* clear entry in instruction cache (68030) */
+CC3_FREEZE_I = 0x00000002 /* freeze instruction cache (68030) */
+CC3_ENABLE_I = 0x00000001 /* enable instruction cache (68030) */
+
+/* Miscellaneous definitions */
+PAGESIZE = 4096
+PAGESHIFT = 12
+
+ROOT_TABLE_SIZE = 128
+PTR_TABLE_SIZE = 128
+PAGE_TABLE_SIZE = 64
+ROOT_INDEX_SHIFT = 25
+PTR_INDEX_SHIFT = 18
+PAGE_INDEX_SHIFT = 12
+
+#ifdef DEBUG
+/* When debugging use readable names for labels */
+#ifdef __STDC__
+#define L(name) .head.S.##name
+#else
+#define L(name) .head.S./**/name
+#endif
+#else
+#ifdef __STDC__
+#define L(name) .L##name
+#else
+#define L(name) .L/**/name
+#endif
+#endif
+
+/* The __INITDATA stuff is a no-op when ftrace or kgdb are turned on */
+#ifndef __INITDATA
+#define __INITDATA .data
+#define __FINIT .previous
+#endif
+
+/* Several macros to make the writing of subroutines easier:
+ * - func_start marks the beginning of the routine which setups the frame
+ * register and saves the registers, it also defines another macro
+ * to automatically restore the registers again.
+ * - func_return marks the end of the routine and simply calls the prepared
+ * macro to restore registers and jump back to the caller.
+ * - func_define generates another macro to automatically put arguments
+ * onto the stack call the subroutine and cleanup the stack again.
+ */
+
+/* Within subroutines these macros can be used to access the arguments
+ * on the stack. With STACK some allocated memory on the stack can be
+ * accessed and ARG0 points to the return address (used by mmu_engage).
+ */
+#define STACK %a6@(stackstart)
+#define ARG0 %a6@(4)
+#define ARG1 %a6@(8)
+#define ARG2 %a6@(12)
+#define ARG3 %a6@(16)
+#define ARG4 %a6@(20)
+
+.macro func_start name,saveregs,stack=0
+L(\name):
+ linkw %a6,#-\stack
+ moveml \saveregs,%sp@-
+.set stackstart,-\stack
+
+.macro func_return_\name
+ moveml %sp@+,\saveregs
+ unlk %a6
+ rts
+.endm
+.endm
+
+.macro func_return name
+ func_return_\name
+.endm
+
+.macro func_call name
+ jbsr L(\name)
+.endm
+
+.macro move_stack nr,arg1,arg2,arg3,arg4
+.if \nr
+ move_stack "(\nr-1)",\arg2,\arg3,\arg4
+ movel \arg1,%sp@-
+.endif
+.endm
+
+.macro func_define name,nr=0
+.macro \name arg1,arg2,arg3,arg4
+ move_stack \nr,\arg1,\arg2,\arg3,\arg4
+ func_call \name
+.if \nr
+ lea %sp@(\nr*4),%sp
+.endif
+.endm
+.endm
+
+func_define mmu_map,4
+func_define mmu_map_tt,4
+func_define mmu_fixup_page_mmu_cache,1
+func_define mmu_temp_map,2
+func_define mmu_engage
+func_define mmu_get_root_table_entry,1
+func_define mmu_get_ptr_table_entry,2
+func_define mmu_get_page_table_entry,2
+func_define mmu_print
+func_define get_new_page
+#if defined(CONFIG_HP300) || defined(CONFIG_APOLLO)
+func_define set_leds
+#endif
+
+.macro mmu_map_eq arg1,arg2,arg3
+ mmu_map \arg1,\arg1,\arg2,\arg3
+.endm
+
+.macro get_bi_record record
+ pea \record
+ func_call get_bi_record
+ addql #4,%sp
+.endm
+
+func_define serial_putc,1
+func_define console_putc,1
+
+func_define console_init
+func_define console_put_stats
+func_define console_put_penguin
+func_define console_plot_pixel,3
+func_define console_scroll
+
+.macro putc ch
+#if defined(CONSOLE) || defined(SERIAL_DEBUG)
+ pea \ch
+#endif
+#ifdef CONSOLE
+ func_call console_putc
+#endif
+#ifdef SERIAL_DEBUG
+ func_call serial_putc
+#endif
+#if defined(CONSOLE) || defined(SERIAL_DEBUG)
+ addql #4,%sp
+#endif
+.endm
+
+.macro dputc ch
+#ifdef DEBUG
+ putc \ch
+#endif
+.endm
+
+func_define putn,1
+
+.macro dputn nr
+#ifdef DEBUG
+ putn \nr
+#endif
+.endm
+
+.macro puts string
+#if defined(CONSOLE) || defined(SERIAL_DEBUG)
+ __INITDATA
+.Lstr\@:
+ .string "\string"
+ __FINIT
+ pea %pc@(.Lstr\@)
+ func_call puts
+ addql #4,%sp
+#endif
+.endm
+
+.macro dputs string
+#ifdef DEBUG
+ puts "\string"
+#endif
+.endm
+
+#define is_not_amiga(lab) cmpl &MACH_AMIGA,%pc@(m68k_machtype); jne lab
+#define is_not_atari(lab) cmpl &MACH_ATARI,%pc@(m68k_machtype); jne lab
+#define is_not_mac(lab) cmpl &MACH_MAC,%pc@(m68k_machtype); jne lab
+#define is_not_mvme147(lab) cmpl &MACH_MVME147,%pc@(m68k_machtype); jne lab
+#define is_not_mvme16x(lab) cmpl &MACH_MVME16x,%pc@(m68k_machtype); jne lab
+#define is_not_bvme6000(lab) cmpl &MACH_BVME6000,%pc@(m68k_machtype); jne lab
+#define is_mvme147(lab) cmpl &MACH_MVME147,%pc@(m68k_machtype); jeq lab
+#define is_mvme16x(lab) cmpl &MACH_MVME16x,%pc@(m68k_machtype); jeq lab
+#define is_bvme6000(lab) cmpl &MACH_BVME6000,%pc@(m68k_machtype); jeq lab
+#define is_not_hp300(lab) cmpl &MACH_HP300,%pc@(m68k_machtype); jne lab
+#define is_not_apollo(lab) cmpl &MACH_APOLLO,%pc@(m68k_machtype); jne lab
+#define is_not_q40(lab) cmpl &MACH_Q40,%pc@(m68k_machtype); jne lab
+#define is_not_sun3x(lab) cmpl &MACH_SUN3X,%pc@(m68k_machtype); jne lab
+
+#define hasnt_leds(lab) cmpl &MACH_HP300,%pc@(m68k_machtype); \
+ jeq 42f; \
+ cmpl &MACH_APOLLO,%pc@(m68k_machtype); \
+ jne lab ;\
+ 42:\
+
+#define is_040_or_060(lab) btst &CPUTYPE_0460,%pc@(L(cputype)+3); jne lab
+#define is_not_040_or_060(lab) btst &CPUTYPE_0460,%pc@(L(cputype)+3); jeq lab
+#define is_040(lab) btst &CPUTYPE_040,%pc@(L(cputype)+3); jne lab
+#define is_060(lab) btst &CPUTYPE_060,%pc@(L(cputype)+3); jne lab
+#define is_not_060(lab) btst &CPUTYPE_060,%pc@(L(cputype)+3); jeq lab
+#define is_020(lab) btst &CPUTYPE_020,%pc@(L(cputype)+3); jne lab
+#define is_not_020(lab) btst &CPUTYPE_020,%pc@(L(cputype)+3); jeq lab
+
+/* On the HP300 we use the on-board LEDs for debug output before
+ the console is running. Writing a 1 bit turns the corresponding LED
+ _off_ - on the 340 bit 7 is towards the back panel of the machine. */
+.macro leds mask
+#if defined(CONFIG_HP300) || defined(CONFIG_APOLLO)
+ hasnt_leds(.Lled\@)
+ pea \mask
+ func_call set_leds
+ addql #4,%sp
+.Lled\@:
+#endif
+.endm
+
+.text
+ENTRY(_stext)
+/*
+ * Version numbers of the bootinfo interface
+ * The area from _stext to _start will later be used as kernel pointer table
+ */
+ bras 1f /* Jump over bootinfo version numbers */
+
+ .long BOOTINFOV_MAGIC
+ .long MACH_AMIGA, AMIGA_BOOTI_VERSION
+ .long MACH_ATARI, ATARI_BOOTI_VERSION
+ .long MACH_MVME147, MVME147_BOOTI_VERSION
+ .long MACH_MVME16x, MVME16x_BOOTI_VERSION
+ .long MACH_BVME6000, BVME6000_BOOTI_VERSION
+ .long MACH_MAC, MAC_BOOTI_VERSION
+ .long MACH_Q40, Q40_BOOTI_VERSION
+ .long MACH_HP300, HP300_BOOTI_VERSION
+ .long 0
+1: jra __start
+
+.equ kernel_pg_dir,_stext
+
+.equ .,_stext+PAGESIZE
+
+ENTRY(_start)
+ jra __start
+__INIT
+ENTRY(__start)
+/*
+ * Setup initial stack pointer
+ */
+ lea %pc@(_stext),%sp
+
+/*
+ * Record the CPU and machine type.
+ */
+ get_bi_record BI_MACHTYPE
+ lea %pc@(m68k_machtype),%a1
+ movel %a0@,%a1@
+
+ get_bi_record BI_FPUTYPE
+ lea %pc@(m68k_fputype),%a1
+ movel %a0@,%a1@
+
+ get_bi_record BI_MMUTYPE
+ lea %pc@(m68k_mmutype),%a1
+ movel %a0@,%a1@
+
+ get_bi_record BI_CPUTYPE
+ lea %pc@(m68k_cputype),%a1
+ movel %a0@,%a1@
+
+ leds 0x1
+
+#ifdef CONFIG_MAC
+/*
+ * For Macintosh, we need to determine the display parameters early (at least
+ * while debugging it).
+ */
+
+ is_not_mac(L(test_notmac))
+
+ get_bi_record BI_MAC_VADDR
+ lea %pc@(L(mac_videobase)),%a1
+ movel %a0@,%a1@
+
+ get_bi_record BI_MAC_VDEPTH
+ lea %pc@(L(mac_videodepth)),%a1
+ movel %a0@,%a1@
+
+ get_bi_record BI_MAC_VDIM
+ lea %pc@(L(mac_dimensions)),%a1
+ movel %a0@,%a1@
+
+ get_bi_record BI_MAC_VROW
+ lea %pc@(L(mac_rowbytes)),%a1
+ movel %a0@,%a1@
+
+#ifdef MAC_SERIAL_DEBUG
+ get_bi_record BI_MAC_SCCBASE
+ lea %pc@(L(mac_sccbase)),%a1
+ movel %a0@,%a1@
+#endif /* MAC_SERIAL_DEBUG */
+
+#if 0
+ /*
+ * Clear the screen
+ */
+ lea %pc@(L(mac_videobase)),%a0
+ movel %a0@,%a1
+ lea %pc@(L(mac_dimensions)),%a0
+ movel %a0@,%d1
+ swap %d1 /* #rows is high bytes */
+ andl #0xFFFF,%d1 /* rows */
+ subl #10,%d1
+ lea %pc@(L(mac_rowbytes)),%a0
+loopy2:
+ movel %a0@,%d0
+ subql #1,%d0
+loopx2:
+ moveb #0x55, %a1@+
+ dbra %d0,loopx2
+ dbra %d1,loopy2
+#endif
+
+L(test_notmac):
+#endif /* CONFIG_MAC */
+
+
+/*
+ * There are ultimately two pieces of information we want for all kinds of
+ * processors CpuType and CacheBits. The CPUTYPE was passed in from booter
+ * and is converted here from a booter type definition to a separate bit
+ * number which allows for the standard is_0x0 macro tests.
+ */
+ movel %pc@(m68k_cputype),%d0
+ /*
+ * Assume it's an 030
+ */
+ clrl %d1
+
+ /*
+ * Test the BootInfo cputype for 060
+ */
+ btst #CPUB_68060,%d0
+ jeq 1f
+ bset #CPUTYPE_060,%d1
+ bset #CPUTYPE_0460,%d1
+ jra 3f
+1:
+ /*
+ * Test the BootInfo cputype for 040
+ */
+ btst #CPUB_68040,%d0
+ jeq 2f
+ bset #CPUTYPE_040,%d1
+ bset #CPUTYPE_0460,%d1
+ jra 3f
+2:
+ /*
+ * Test the BootInfo cputype for 020
+ */
+ btst #CPUB_68020,%d0
+ jeq 3f
+ bset #CPUTYPE_020,%d1
+ jra 3f
+3:
+ /*
+ * Record the cpu type
+ */
+ lea %pc@(L(cputype)),%a0
+ movel %d1,%a0@
+
+ /*
+ * NOTE:
+ *
+ * Now the macros are valid:
+ * is_040_or_060
+ * is_not_040_or_060
+ * is_040
+ * is_060
+ * is_not_060
+ */
+
+ /*
+ * Determine the cache mode for pages holding MMU tables
+ * and for supervisor mode, unused for '020 and '030
+ */
+ clrl %d0
+ clrl %d1
+
+ is_not_040_or_060(L(save_cachetype))
+
+ /*
+ * '040 or '060
+ * d1 := cacheable write-through
+ * NOTE: The 68040 manual strongly recommends non-cached for MMU tables,
+ * but we have been using write-through since at least 2.0.29 so I
+ * guess it is OK.
+ */
+#ifdef CONFIG_060_WRITETHROUGH
+ /*
+ * If this is a 68060 board using drivers with cache coherency
+ * problems, then supervisor memory accesses need to be write-through
+ * also; otherwise, we want copyback.
+ */
+
+ is_not_060(1f)
+ movel #_PAGE_CACHE040W,%d0
+ jra L(save_cachetype)
+#endif /* CONFIG_060_WRITETHROUGH */
+1:
+ movew #_PAGE_CACHE040,%d0
+
+ movel #_PAGE_CACHE040W,%d1
+
+L(save_cachetype):
+ /* Save cache mode for supervisor mode and page tables
+ */
+ lea %pc@(m68k_supervisor_cachemode),%a0
+ movel %d0,%a0@
+ lea %pc@(m68k_pgtable_cachemode),%a0
+ movel %d1,%a0@
+
+/*
+ * raise interrupt level
+ */
+ movew #0x2700,%sr
+
+/*
+ If running on an Atari, determine the I/O base of the
+ serial port and test if we are running on a Medusa or Hades.
+ This test is necessary here, because on the Hades the serial
+ port is only accessible in the high I/O memory area.
+
+ The test whether it is a Medusa is done by writing to the byte at
+ phys. 0x0. This should result in a bus error on all other machines.
+
+ ...should, but doesn't. The Afterburner040 for the Falcon has the
+ same behaviour (0x0..0x7 are no ROM shadow). So we have to do
+ another test to distinguish Medusa and AB040. This is a
+ read attempt for 0x00ff82fe phys. that should bus error on a Falcon
+ (+AB040), but is in the range where the Medusa always asserts DTACK.
+
+ The test for the Hades is done by reading address 0xb0000000. This
+ should give a bus error on the Medusa.
+ */
+
+#ifdef CONFIG_ATARI
+ is_not_atari(L(notypetest))
+
+ /* get special machine type (Medusa/Hades/AB40) */
+ moveq #0,%d3 /* default if tag doesn't exist */
+ get_bi_record BI_ATARI_MCH_TYPE
+ tstl %d0
+ jbmi 1f
+ movel %a0@,%d3
+ lea %pc@(atari_mch_type),%a0
+ movel %d3,%a0@
+1:
+ /* On the Hades, the iobase must be set up before opening the
+ * serial port. There are no I/O regs at 0x00ffxxxx at all. */
+ moveq #0,%d0
+ cmpl #ATARI_MACH_HADES,%d3
+ jbne 1f
+ movel #0xff000000,%d0 /* Hades I/O base addr: 0xff000000 */
+1: lea %pc@(L(iobase)),%a0
+ movel %d0,%a0@
+
+L(notypetest):
+#endif
+
+#ifdef CONFIG_VME
+ is_mvme147(L(getvmetype))
+ is_bvme6000(L(getvmetype))
+ is_not_mvme16x(L(gvtdone))
+
+ /* See if the loader has specified the BI_VME_TYPE tag. Recent
+ * versions of VMELILO and TFTPLILO do this. We have to do this
+ * early so we know how to handle console output. If the tag
+ * doesn't exist then we use the Bug for output on MVME16x.
+ */
+L(getvmetype):
+ get_bi_record BI_VME_TYPE
+ tstl %d0
+ jbmi 1f
+ movel %a0@,%d3
+ lea %pc@(vme_brdtype),%a0
+ movel %d3,%a0@
+1:
+#ifdef CONFIG_MVME16x
+ is_not_mvme16x(L(gvtdone))
+
+ /* Need to get the BRD_ID info to differentiate between 162, 167,
+ * etc. This is available as a BI_VME_BRDINFO tag with later
+ * versions of VMELILO and TFTPLILO, otherwise we call the Bug.
+ */
+ get_bi_record BI_VME_BRDINFO
+ tstl %d0
+ jpl 1f
+
+ /* Get pointer to board ID data from Bug */
+ movel %d2,%sp@-
+ trap #15
+ .word 0x70 /* trap 0x70 - .BRD_ID */
+ movel %sp@+,%a0
+1:
+ lea %pc@(mvme_bdid),%a1
+ /* Structure is 32 bytes long */
+ movel %a0@+,%a1@+
+ movel %a0@+,%a1@+
+ movel %a0@+,%a1@+
+ movel %a0@+,%a1@+
+ movel %a0@+,%a1@+
+ movel %a0@+,%a1@+
+ movel %a0@+,%a1@+
+ movel %a0@+,%a1@+
+#endif
+
+L(gvtdone):
+
+#endif
+
+#ifdef CONFIG_HP300
+ is_not_hp300(L(nothp))
+
+ /* Get the address of the UART for serial debugging */
+ get_bi_record BI_HP300_UART_ADDR
+ tstl %d0
+ jbmi 1f
+ movel %a0@,%d3
+ lea %pc@(L(uartbase)),%a0
+ movel %d3,%a0@
+ get_bi_record BI_HP300_UART_SCODE
+ tstl %d0
+ jbmi 1f
+ movel %a0@,%d3
+ lea %pc@(L(uart_scode)),%a0
+ movel %d3,%a0@
+1:
+L(nothp):
+#endif
+
+/*
+ * Initialize serial port
+ */
+ jbsr L(serial_init)
+
+/*
+ * Initialize console
+ */
+#ifdef CONFIG_MAC
+ is_not_mac(L(nocon))
+#ifdef CONSOLE
+ console_init
+#ifdef CONSOLE_PENGUIN
+ console_put_penguin
+#endif /* CONSOLE_PENGUIN */
+ console_put_stats
+#endif /* CONSOLE */
+L(nocon):
+#endif /* CONFIG_MAC */
+
+
+ putc '\n'
+ putc 'A'
+ leds 0x2
+ dputn %pc@(L(cputype))
+ dputn %pc@(m68k_supervisor_cachemode)
+ dputn %pc@(m68k_pgtable_cachemode)
+ dputc '\n'
+
+/*
+ * Save physical start address of kernel
+ */
+ lea %pc@(L(phys_kernel_start)),%a0
+ lea %pc@(_stext),%a1
+ subl #_stext,%a1
+ addl #PAGE_OFFSET,%a1
+ movel %a1,%a0@
+
+ putc 'B'
+
+ leds 0x4
+
+/*
+ * mmu_init
+ *
+ * This block of code does what's necessary to map in the various kinds
+ * of machines for execution of Linux.
+ * First map the first 4 MB of kernel code & data
+ */
+
+ mmu_map #PAGE_OFFSET,%pc@(L(phys_kernel_start)),#4*1024*1024,\
+ %pc@(m68k_supervisor_cachemode)
+
+ putc 'C'
+
+#ifdef CONFIG_AMIGA
+
+L(mmu_init_amiga):
+
+ is_not_amiga(L(mmu_init_not_amiga))
+/*
+ * mmu_init_amiga
+ */
+
+ putc 'D'
+
+ is_not_040_or_060(1f)
+
+ /*
+ * 040: Map the 16Meg range physical 0x0 upto logical 0x8000.0000
+ */
+ mmu_map #0x80000000,#0,#0x01000000,#_PAGE_NOCACHE_S
+ /*
+ * Map the Zorro III I/O space with transparent translation
+ * for frame buffer memory etc.
+ */
+ mmu_map_tt #1,#0x40000000,#0x20000000,#_PAGE_NOCACHE_S
+
+ jbra L(mmu_init_done)
+
+1:
+ /*
+ * 030: Map the 32Meg range physical 0x0 upto logical 0x8000.0000
+ */
+ mmu_map #0x80000000,#0,#0x02000000,#_PAGE_NOCACHE030
+ mmu_map_tt #1,#0x40000000,#0x20000000,#_PAGE_NOCACHE030
+
+ jbra L(mmu_init_done)
+
+L(mmu_init_not_amiga):
+#endif
+
+#ifdef CONFIG_ATARI
+
+L(mmu_init_atari):
+
+ is_not_atari(L(mmu_init_not_atari))
+
+ putc 'E'
+
+/* On the Atari, we map the I/O region (phys. 0x00ffxxxx) by mapping
+ the last 16 MB of virtual address space to the first 16 MB (i.e.
+ 0xffxxxxxx -> 0x00xxxxxx). For this, an additional pointer table is
+ needed. I/O ranges are marked non-cachable.
+
+ For the Medusa it is better to map the I/O region transparently
+ (i.e. 0xffxxxxxx -> 0xffxxxxxx), because some I/O registers are
+ accessible only in the high area.
+
+ On the Hades all I/O registers are only accessible in the high
+ area.
+*/
+
+ /* I/O base addr for non-Medusa, non-Hades: 0x00000000 */
+ moveq #0,%d0
+ movel %pc@(atari_mch_type),%d3
+ cmpl #ATARI_MACH_MEDUSA,%d3
+ jbeq 2f
+ cmpl #ATARI_MACH_HADES,%d3
+ jbne 1f
+2: movel #0xff000000,%d0 /* Medusa/Hades base addr: 0xff000000 */
+1: movel %d0,%d3
+
+ is_040_or_060(L(spata68040))
+
+ /* Map everything non-cacheable, though not all parts really
+ * need to disable caches (crucial only for 0xff8000..0xffffff
+ * (standard I/O) and 0xf00000..0xf3ffff (IDE)). The remainder
+ * isn't really used, except for sometimes peeking into the
+ * ROMs (mirror at phys. 0x0), so caching isn't necessary for
+ * this. */
+ mmu_map #0xff000000,%d3,#0x01000000,#_PAGE_NOCACHE030
+
+ jbra L(mmu_init_done)
+
+L(spata68040):
+
+ mmu_map #0xff000000,%d3,#0x01000000,#_PAGE_NOCACHE_S
+
+ jbra L(mmu_init_done)
+
+L(mmu_init_not_atari):
+#endif
+
+#ifdef CONFIG_Q40
+ is_not_q40(L(notq40))
+ /*
+ * add transparent mapping for 0xff00 0000 - 0xffff ffff
+ * non-cached serialized etc..
+ * this includes master chip, DAC, RTC and ISA ports
+ * 0xfe000000-0xfeffffff is for screen and ROM
+ */
+
+ putc 'Q'
+
+ mmu_map_tt #0,#0xfe000000,#0x01000000,#_PAGE_CACHE040W
+ mmu_map_tt #1,#0xff000000,#0x01000000,#_PAGE_NOCACHE_S
+
+ jbra L(mmu_init_done)
+
+L(notq40):
+#endif
+
+#ifdef CONFIG_HP300
+ is_not_hp300(L(nothp300))
+
+ /* On the HP300, we map the ROM, INTIO and DIO regions (phys. 0x00xxxxxx)
+ * by mapping 32MB (on 020/030) or 16 MB (on 040) from 0xf0xxxxxx -> 0x00xxxxxx).
+ * The ROM mapping is needed because the LEDs are mapped there too.
+ */
+
+ is_040(1f)
+
+ /*
+ * 030: Map the 32Meg range physical 0x0 upto logical 0xf000.0000
+ */
+ mmu_map #0xf0000000,#0,#0x02000000,#_PAGE_NOCACHE030
+
+ jbra L(mmu_init_done)
+
+1:
+ /*
+ * 040: Map the 16Meg range physical 0x0 upto logical 0xf000.0000
+ */
+ mmu_map #0xf0000000,#0,#0x01000000,#_PAGE_NOCACHE_S
+
+ jbra L(mmu_init_done)
+
+L(nothp300):
+#endif /* CONFIG_HP300 */
+
+#ifdef CONFIG_MVME147
+
+ is_not_mvme147(L(not147))
+
+ /*
+ * On MVME147 we have already created kernel page tables for
+ * 4MB of RAM at address 0, so now need to do a transparent
+ * mapping of the top of memory space. Make it 0.5GByte for now,
+ * so we can access on-board i/o areas.
+ */
+
+ mmu_map_tt #1,#0xe0000000,#0x20000000,#_PAGE_NOCACHE030
+
+ jbra L(mmu_init_done)
+
+L(not147):
+#endif /* CONFIG_MVME147 */
+
+#ifdef CONFIG_MVME16x
+
+ is_not_mvme16x(L(not16x))
+
+ /*
+ * On MVME16x we have already created kernel page tables for
+ * 4MB of RAM at address 0, so now need to do a transparent
+ * mapping of the top of memory space. Make it 0.5GByte for now.
+ * Supervisor only access, so transparent mapping doesn't
+ * clash with User code virtual address space.
+ * this covers IO devices, PROM and SRAM. The PROM and SRAM
+ * mapping is needed to allow 167Bug to run.
+ * IO is in the range 0xfff00000 to 0xfffeffff.
+ * PROM is 0xff800000->0xffbfffff and SRAM is
+ * 0xffe00000->0xffe1ffff.
+ */
+
+ mmu_map_tt #1,#0xe0000000,#0x20000000,#_PAGE_NOCACHE_S
+
+ jbra L(mmu_init_done)
+
+L(not16x):
+#endif /* CONFIG_MVME162 | CONFIG_MVME167 */
+
+#ifdef CONFIG_BVME6000
+
+ is_not_bvme6000(L(not6000))
+
+ /*
+ * On BVME6000 we have already created kernel page tables for
+ * 4MB of RAM at address 0, so now need to do a transparent
+ * mapping of the top of memory space. Make it 0.5GByte for now,
+ * so we can access on-board i/o areas.
+ * Supervisor only access, so transparent mapping doesn't
+ * clash with User code virtual address space.
+ */
+
+ mmu_map_tt #1,#0xe0000000,#0x20000000,#_PAGE_NOCACHE_S
+
+ jbra L(mmu_init_done)
+
+L(not6000):
+#endif /* CONFIG_BVME6000 */
+
+/*
+ * mmu_init_mac
+ *
+ * The Macintosh mappings are less clear.
+ *
+ * Even as of this writing, it is unclear how the
+ * Macintosh mappings will be done. However, as
+ * the first author of this code I'm proposing the
+ * following model:
+ *
+ * Map the kernel (that's already done),
+ * Map the I/O (on most machines that's the
+ * 0x5000.0000 ... 0x5300.0000 range,
+ * Map the video frame buffer using as few pages
+ * as absolutely (this requirement mostly stems from
+ * the fact that when the frame buffer is at
+ * 0x0000.0000 then we know there is valid RAM just
+ * above the screen that we don't want to waste!).
+ *
+ * By the way, if the frame buffer is at 0x0000.0000
+ * then the Macintosh is known as an RBV based Mac.
+ *
+ * By the way 2, the code currently maps in a bunch of
+ * regions. But I'd like to cut that out. (And move most
+ * of the mappings up into the kernel proper ... or only
+ * map what's necessary.)
+ */
+
+#ifdef CONFIG_MAC
+
+L(mmu_init_mac):
+
+ is_not_mac(L(mmu_init_not_mac))
+
+ putc 'F'
+
+ is_not_040_or_060(1f)
+
+ moveq #_PAGE_NOCACHE_S,%d3
+ jbra 2f
+1:
+ moveq #_PAGE_NOCACHE030,%d3
+2:
+ /*
+ * Mac Note: screen address of logical 0xF000.0000 -> <screen physical>
+ * we simply map the 4MB that contains the videomem
+ */
+
+ movel #VIDEOMEMMASK,%d0
+ andl %pc@(L(mac_videobase)),%d0
+
+ mmu_map #VIDEOMEMBASE,%d0,#VIDEOMEMSIZE,%d3
+ /* ROM from 4000 0000 to 4200 0000 (only for mac_reset()) */
+ mmu_map_eq #0x40000000,#0x02000000,%d3
+ /* IO devices (incl. serial port) from 5000 0000 to 5300 0000 */
+ mmu_map_eq #0x50000000,#0x03000000,%d3
+ /* Nubus slot space (video at 0xF0000000, rom at 0xF0F80000) */
+ mmu_map_tt #1,#0xf8000000,#0x08000000,%d3
+
+ jbra L(mmu_init_done)
+
+L(mmu_init_not_mac):
+#endif
+
+#ifdef CONFIG_SUN3X
+ is_not_sun3x(L(notsun3x))
+
+ /* oh, the pain.. We're gonna want the prom code after
+ * starting the MMU, so we copy the mappings, translating
+ * from 8k -> 4k pages as we go.
+ */
+
+ /* copy maps from 0xfee00000 to 0xff000000 */
+ movel #0xfee00000, %d0
+ moveq #ROOT_INDEX_SHIFT, %d1
+ lsrl %d1,%d0
+ mmu_get_root_table_entry %d0
+
+ movel #0xfee00000, %d0
+ moveq #PTR_INDEX_SHIFT, %d1
+ lsrl %d1,%d0
+ andl #PTR_TABLE_SIZE-1, %d0
+ mmu_get_ptr_table_entry %a0,%d0
+
+ movel #0xfee00000, %d0
+ moveq #PAGE_INDEX_SHIFT, %d1
+ lsrl %d1,%d0
+ andl #PAGE_TABLE_SIZE-1, %d0
+ mmu_get_page_table_entry %a0,%d0
+
+ /* this is where the prom page table lives */
+ movel 0xfefe00d4, %a1
+ movel %a1@, %a1
+
+ movel #((0x200000 >> 13)-1), %d1
+
+1:
+ movel %a1@+, %d3
+ movel %d3,%a0@+
+ addl #0x1000,%d3
+ movel %d3,%a0@+
+
+ dbra %d1,1b
+
+ /* setup tt1 for I/O */
+ mmu_map_tt #1,#0x40000000,#0x40000000,#_PAGE_NOCACHE_S
+ jbra L(mmu_init_done)
+
+L(notsun3x):
+#endif
+
+#ifdef CONFIG_APOLLO
+ is_not_apollo(L(notapollo))
+
+ putc 'P'
+ mmu_map #0x80000000,#0,#0x02000000,#_PAGE_NOCACHE030
+
+L(notapollo):
+ jbra L(mmu_init_done)
+#endif
+
+L(mmu_init_done):
+
+ putc 'G'
+ leds 0x8
+
+/*
+ * mmu_fixup
+ *
+ * On the 040 class machines, all pages that are used for the
+ * mmu have to be fixed up. According to Motorola, pages holding mmu
+ * tables should be non-cacheable on a '040 and write-through on a
+ * '060. But analysis of the reasons for this, and practical
+ * experience, showed that write-through also works on a '040.
+ *
+ * Allocated memory so far goes from kernel_end to memory_start that
+ * is used for all kind of tables, for that the cache attributes
+ * are now fixed.
+ */
+L(mmu_fixup):
+
+ is_not_040_or_060(L(mmu_fixup_done))
+
+#ifdef MMU_NOCACHE_KERNEL
+ jbra L(mmu_fixup_done)
+#endif
+
+ /* first fix the page at the start of the kernel, that
+ * contains also kernel_pg_dir.
+ */
+ movel %pc@(L(phys_kernel_start)),%d0
+ subl #PAGE_OFFSET,%d0
+ lea %pc@(_stext),%a0
+ subl %d0,%a0
+ mmu_fixup_page_mmu_cache %a0
+
+ movel %pc@(L(kernel_end)),%a0
+ subl %d0,%a0
+ movel %pc@(L(memory_start)),%a1
+ subl %d0,%a1
+ bra 2f
+1:
+ mmu_fixup_page_mmu_cache %a0
+ addw #PAGESIZE,%a0
+2:
+ cmpl %a0,%a1
+ jgt 1b
+
+L(mmu_fixup_done):
+
+#ifdef MMU_PRINT
+ mmu_print
+#endif
+
+/*
+ * mmu_engage
+ *
+ * This chunk of code performs the gruesome task of engaging the MMU.
+ * The reason its gruesome is because when the MMU becomes engaged it
+ * maps logical addresses to physical addresses. The Program Counter
+ * register is then passed through the MMU before the next instruction
+ * is fetched (the instruction following the engage MMU instruction).
+ * This may mean one of two things:
+ * 1. The Program Counter falls within the logical address space of
+ * the kernel of which there are two sub-possibilities:
+ * A. The PC maps to the correct instruction (logical PC == physical
+ * code location), or
+ * B. The PC does not map through and the processor will read some
+ * data (or instruction) which is not the logically next instr.
+ * As you can imagine, A is good and B is bad.
+ * Alternatively,
+ * 2. The Program Counter does not map through the MMU. The processor
+ * will take a Bus Error.
+ * Clearly, 2 is bad.
+ * It doesn't take a wiz kid to figure you want 1.A.
+ * This code creates that possibility.
+ * There are two possible 1.A. states (we now ignore the other above states):
+ * A. The kernel is located at physical memory addressed the same as
+ * the logical memory for the kernel, i.e., 0x01000.
+ * B. The kernel is located some where else. e.g., 0x0400.0000
+ *
+ * Under some conditions the Macintosh can look like A or B.
+ * [A friend and I once noted that Apple hardware engineers should be
+ * wacked twice each day: once when they show up at work (as in, Whack!,
+ * "This is for the screwy hardware we know you're going to design today."),
+ * and also at the end of the day (as in, Whack! "I don't know what
+ * you designed today, but I'm sure it wasn't good."). -- rst]
+ *
+ * This code works on the following premise:
+ * If the kernel start (%d5) is within the first 16 Meg of RAM,
+ * then create a mapping for the kernel at logical 0x8000.0000 to
+ * the physical location of the pc. And, create a transparent
+ * translation register for the first 16 Meg. Then, after the MMU
+ * is engaged, the PC can be moved up into the 0x8000.0000 range
+ * and then the transparent translation can be turned off and then
+ * the PC can jump to the correct logical location and it will be
+ * home (finally). This is essentially the code that the Amiga used
+ * to use. Now, it's generalized for all processors. Which means
+ * that a fresh (but temporary) mapping has to be created. The mapping
+ * is made in page 0 (an as of yet unused location -- except for the
+ * stack!). This temporary mapping will only require 1 pointer table
+ * and a single page table (it can map 256K).
+ *
+ * OK, alternatively, imagine that the Program Counter is not within
+ * the first 16 Meg. Then, just use Transparent Translation registers
+ * to do the right thing.
+ *
+ * Last, if _start is already at 0x01000, then there's nothing special
+ * to do (in other words, in a degenerate case of the first case above,
+ * do nothing).
+ *
+ * Let's do it.
+ *
+ *
+ */
+
+ putc 'H'
+
+ mmu_engage
+
+/*
+ * After this point no new memory is allocated and
+ * the start of available memory is stored in availmem.
+ * (The bootmem allocator requires now the physicall address.)
+ */
+
+ movel L(memory_start),availmem
+
+#ifdef CONFIG_AMIGA
+ is_not_amiga(1f)
+ /* fixup the Amiga custom register location before printing */
+ clrl L(custom)
+1:
+#endif
+
+#ifdef CONFIG_ATARI
+ is_not_atari(1f)
+ /* fixup the Atari iobase register location before printing */
+ movel #0xff000000,L(iobase)
+1:
+#endif
+
+#ifdef CONFIG_MAC
+ is_not_mac(1f)
+ movel #~VIDEOMEMMASK,%d0
+ andl L(mac_videobase),%d0
+ addl #VIDEOMEMBASE,%d0
+ movel %d0,L(mac_videobase)
+#if defined(CONSOLE)
+ movel %pc@(L(phys_kernel_start)),%d0
+ subl #PAGE_OFFSET,%d0
+ subl %d0,L(console_font)
+ subl %d0,L(console_font_data)
+#endif
+#ifdef MAC_SERIAL_DEBUG
+ orl #0x50000000,L(mac_sccbase)
+#endif
+1:
+#endif
+
+#ifdef CONFIG_HP300
+ is_not_hp300(1f)
+ /*
+ * Fix up the iobase register to point to the new location of the LEDs.
+ */
+ movel #0xf0000000,L(iobase)
+
+ /*
+ * Energise the FPU and caches.
+ */
+ is_040(1f)
+ movel #0x60,0xf05f400c
+ jbra 2f
+
+ /*
+ * 040: slightly different, apparently.
+ */
+1: movew #0,0xf05f400e
+ movew #0x64,0xf05f400e
+2:
+#endif
+
+#ifdef CONFIG_SUN3X
+ is_not_sun3x(1f)
+
+ /* enable copro */
+ oriw #0x4000,0x61000000
+1:
+#endif
+
+#ifdef CONFIG_APOLLO
+ is_not_apollo(1f)
+
+ /*
+ * Fix up the iobase before printing
+ */
+ movel #0x80000000,L(iobase)
+1:
+#endif
+
+ putc 'I'
+ leds 0x10
+
+/*
+ * Enable caches
+ */
+
+ is_not_040_or_060(L(cache_not_680460))
+
+L(cache680460):
+ .chip 68040
+ nop
+ cpusha %bc
+ nop
+
+ is_060(L(cache68060))
+
+ movel #CC6_ENABLE_D+CC6_ENABLE_I,%d0
+ /* MMU stuff works in copyback mode now, so enable the cache */
+ movec %d0,%cacr
+ jra L(cache_done)
+
+L(cache68060):
+ movel #CC6_ENABLE_D+CC6_ENABLE_I+CC6_ENABLE_SB+CC6_PUSH_DPI+CC6_ENABLE_B+CC6_CLRA_B,%d0
+ /* MMU stuff works in copyback mode now, so enable the cache */
+ movec %d0,%cacr
+ /* enable superscalar dispatch in PCR */
+ moveq #1,%d0
+ .chip 68060
+ movec %d0,%pcr
+
+ jbra L(cache_done)
+L(cache_not_680460):
+L(cache68030):
+ .chip 68030
+ movel #CC3_ENABLE_DB+CC3_CLR_D+CC3_ENABLE_D+CC3_ENABLE_IB+CC3_CLR_I+CC3_ENABLE_I,%d0
+ movec %d0,%cacr
+
+ jra L(cache_done)
+ .chip 68k
+L(cache_done):
+
+ putc 'J'
+
+/*
+ * Setup initial stack pointer
+ */
+ lea init_task,%curptr
+ lea init_thread_union+THREAD_SIZE,%sp
+
+ putc 'K'
+
+ subl %a6,%a6 /* clear a6 for gdb */
+
+/*
+ * The new 64bit printf support requires an early exception initialization.
+ */
+ jbsr base_trap_init
+
+/* jump to the kernel start */
+
+ putc '\n'
+ leds 0x55
+
+ jbsr start_kernel
+
+/*
+ * Find a tag record in the bootinfo structure
+ * The bootinfo structure is located right after the kernel bss
+ * Returns: d0: size (-1 if not found)
+ * a0: data pointer (end-of-records if not found)
+ */
+func_start get_bi_record,%d1
+
+ movel ARG1,%d0
+ lea %pc@(_end),%a0
+1: tstw %a0@(BIR_TAG)
+ jeq 3f
+ cmpw %a0@(BIR_TAG),%d0
+ jeq 2f
+ addw %a0@(BIR_SIZE),%a0
+ jra 1b
+2: moveq #0,%d0
+ movew %a0@(BIR_SIZE),%d0
+ lea %a0@(BIR_DATA),%a0
+ jra 4f
+3: moveq #-1,%d0
+ lea %a0@(BIR_SIZE),%a0
+4:
+func_return get_bi_record
+
+
+/*
+ * MMU Initialization Begins Here
+ *
+ * The structure of the MMU tables on the 68k machines
+ * is thus:
+ * Root Table
+ * Logical addresses are translated through
+ * a hierarchical translation mechanism where the high-order
+ * seven bits of the logical address (LA) are used as an
+ * index into the "root table." Each entry in the root
+ * table has a bit which specifies if it's a valid pointer to a
+ * pointer table. Each entry defines a 32KMeg range of memory.
+ * If an entry is invalid then that logical range of 32M is
+ * invalid and references to that range of memory (when the MMU
+ * is enabled) will fault. If the entry is valid, then it does
+ * one of two things. On 040/060 class machines, it points to
+ * a pointer table which then describes more finely the memory
+ * within that 32M range. On 020/030 class machines, a technique
+ * called "early terminating descriptors" are used. This technique
+ * allows an entire 32Meg to be described by a single entry in the
+ * root table. Thus, this entry in the root table, contains the
+ * physical address of the memory or I/O at the logical address
+ * which the entry represents and it also contains the necessary
+ * cache bits for this region.
+ *
+ * Pointer Tables
+ * Per the Root Table, there will be one or more
+ * pointer tables. Each pointer table defines a 32M range.
+ * Not all of the 32M range need be defined. Again, the next
+ * seven bits of the logical address are used an index into
+ * the pointer table to point to page tables (if the pointer
+ * is valid). There will undoubtedly be more than one
+ * pointer table for the kernel because each pointer table
+ * defines a range of only 32M. Valid pointer table entries
+ * point to page tables, or are early terminating entries
+ * themselves.
+ *
+ * Page Tables
+ * Per the Pointer Tables, each page table entry points
+ * to the physical page in memory that supports the logical
+ * address that translates to the particular index.
+ *
+ * In short, the Logical Address gets translated as follows:
+ * bits 31..26 - index into the Root Table
+ * bits 25..18 - index into the Pointer Table
+ * bits 17..12 - index into the Page Table
+ * bits 11..0 - offset into a particular 4K page
+ *
+ * The algorithms which follows do one thing: they abstract
+ * the MMU hardware. For example, there are three kinds of
+ * cache settings that are relevant. Either, memory is
+ * being mapped in which case it is either Kernel Code (or
+ * the RamDisk) or it is MMU data. On the 030, the MMU data
+ * option also describes the kernel. Or, I/O is being mapped
+ * in which case it has its own kind of cache bits. There
+ * are constants which abstract these notions from the code that
+ * actually makes the call to map some range of memory.
+ *
+ *
+ *
+ */
+
+#ifdef MMU_PRINT
+/*
+ * mmu_print
+ *
+ * This algorithm will print out the current MMU mappings.
+ *
+ * Input:
+ * %a5 points to the root table. Everything else is calculated
+ * from this.
+ */
+
+#define mmu_next_valid 0
+#define mmu_start_logical 4
+#define mmu_next_logical 8
+#define mmu_start_physical 12
+#define mmu_next_physical 16
+
+#define MMU_PRINT_INVALID -1
+#define MMU_PRINT_VALID 1
+#define MMU_PRINT_UNINITED 0
+
+#define putZc(z,n) jbne 1f; putc z; jbra 2f; 1: putc n; 2:
+
+func_start mmu_print,%a0-%a6/%d0-%d7
+
+ movel %pc@(L(kernel_pgdir_ptr)),%a5
+ lea %pc@(L(mmu_print_data)),%a0
+ movel #MMU_PRINT_UNINITED,%a0@(mmu_next_valid)
+
+ is_not_040_or_060(mmu_030_print)
+
+mmu_040_print:
+ puts "\nMMU040\n"
+ puts "rp:"
+ putn %a5
+ putc '\n'
+#if 0
+ /*
+ * The following #if/#endif block is a tight algorithm for dumping the 040
+ * MMU Map in gory detail. It really isn't that practical unless the
+ * MMU Map algorithm appears to go awry and you need to debug it at the
+ * entry per entry level.
+ */
+ movel #ROOT_TABLE_SIZE,%d5
+#if 0
+ movel %a5@+,%d7 | Burn an entry to skip the kernel mappings,
+ subql #1,%d5 | they (might) work
+#endif
+1: tstl %d5
+ jbeq mmu_print_done
+ subq #1,%d5
+ movel %a5@+,%d7
+ btst #1,%d7
+ jbeq 1b
+
+2: putn %d7
+ andil #0xFFFFFE00,%d7
+ movel %d7,%a4
+ movel #PTR_TABLE_SIZE,%d4
+ putc ' '
+3: tstl %d4
+ jbeq 11f
+ subq #1,%d4
+ movel %a4@+,%d7
+ btst #1,%d7
+ jbeq 3b
+
+4: putn %d7
+ andil #0xFFFFFF00,%d7
+ movel %d7,%a3
+ movel #PAGE_TABLE_SIZE,%d3
+5: movel #8,%d2
+6: tstl %d3
+ jbeq 31f
+ subq #1,%d3
+ movel %a3@+,%d6
+ btst #0,%d6
+ jbeq 6b
+7: tstl %d2
+ jbeq 8f
+ subq #1,%d2
+ putc ' '
+ jbra 91f
+8: putc '\n'
+ movel #8+1+8+1+1,%d2
+9: putc ' '
+ dbra %d2,9b
+ movel #7,%d2
+91: putn %d6
+ jbra 6b
+
+31: putc '\n'
+ movel #8+1,%d2
+32: putc ' '
+ dbra %d2,32b
+ jbra 3b
+
+11: putc '\n'
+ jbra 1b
+#endif /* MMU 040 Dumping code that's gory and detailed */
+
+ lea %pc@(kernel_pg_dir),%a5
+ movel %a5,%a0 /* a0 has the address of the root table ptr */
+ movel #0x00000000,%a4 /* logical address */
+ moveql #0,%d0
+40:
+ /* Increment the logical address and preserve in d5 */
+ movel %a4,%d5
+ addil #PAGESIZE<<13,%d5
+ movel %a0@+,%d6
+ btst #1,%d6
+ jbne 41f
+ jbsr mmu_print_tuple_invalidate
+ jbra 48f
+41:
+ movel #0,%d1
+ andil #0xfffffe00,%d6
+ movel %d6,%a1
+42:
+ movel %a4,%d5
+ addil #PAGESIZE<<6,%d5
+ movel %a1@+,%d6
+ btst #1,%d6
+ jbne 43f
+ jbsr mmu_print_tuple_invalidate
+ jbra 47f
+43:
+ movel #0,%d2
+ andil #0xffffff00,%d6
+ movel %d6,%a2
+44:
+ movel %a4,%d5
+ addil #PAGESIZE,%d5
+ movel %a2@+,%d6
+ btst #0,%d6
+ jbne 45f
+ jbsr mmu_print_tuple_invalidate
+ jbra 46f
+45:
+ moveml %d0-%d1,%sp@-
+ movel %a4,%d0
+ movel %d6,%d1
+ andil #0xfffff4e0,%d1
+ lea %pc@(mmu_040_print_flags),%a6
+ jbsr mmu_print_tuple
+ moveml %sp@+,%d0-%d1
+46:
+ movel %d5,%a4
+ addq #1,%d2
+ cmpib #64,%d2
+ jbne 44b
+47:
+ movel %d5,%a4
+ addq #1,%d1
+ cmpib #128,%d1
+ jbne 42b
+48:
+ movel %d5,%a4 /* move to the next logical address */
+ addq #1,%d0
+ cmpib #128,%d0
+ jbne 40b
+
+ .chip 68040
+ movec %dtt1,%d0
+ movel %d0,%d1
+ andiw #0x8000,%d1 /* is it valid ? */
+ jbeq 1f /* No, bail out */
+
+ movel %d0,%d1
+ andil #0xff000000,%d1 /* Get the address */
+ putn %d1
+ puts "=="
+ putn %d1
+
+ movel %d0,%d6
+ jbsr mmu_040_print_flags_tt
+1:
+ movec %dtt0,%d0
+ movel %d0,%d1
+ andiw #0x8000,%d1 /* is it valid ? */
+ jbeq 1f /* No, bail out */
+
+ movel %d0,%d1
+ andil #0xff000000,%d1 /* Get the address */
+ putn %d1
+ puts "=="
+ putn %d1
+
+ movel %d0,%d6
+ jbsr mmu_040_print_flags_tt
+1:
+ .chip 68k
+
+ jbra mmu_print_done
+
+mmu_040_print_flags:
+ btstl #10,%d6
+ putZc(' ','G') /* global bit */
+ btstl #7,%d6
+ putZc(' ','S') /* supervisor bit */
+mmu_040_print_flags_tt:
+ btstl #6,%d6
+ jbne 3f
+ putc 'C'
+ btstl #5,%d6
+ putZc('w','c') /* write through or copy-back */
+ jbra 4f
+3:
+ putc 'N'
+ btstl #5,%d6
+ putZc('s',' ') /* serialized non-cacheable, or non-cacheable */
+4:
+ rts
+
+mmu_030_print_flags:
+ btstl #6,%d6
+ putZc('C','I') /* write through or copy-back */
+ rts
+
+mmu_030_print:
+ puts "\nMMU030\n"
+ puts "\nrp:"
+ putn %a5
+ putc '\n'
+ movel %a5,%d0
+ andil #0xfffffff0,%d0
+ movel %d0,%a0
+ movel #0x00000000,%a4 /* logical address */
+ movel #0,%d0
+30:
+ movel %a4,%d5
+ addil #PAGESIZE<<13,%d5
+ movel %a0@+,%d6
+ btst #1,%d6 /* is it a table ptr? */
+ jbne 31f /* yes */
+ btst #0,%d6 /* is it early terminating? */
+ jbeq 1f /* no */
+ jbsr mmu_030_print_helper
+ jbra 38f
+1:
+ jbsr mmu_print_tuple_invalidate
+ jbra 38f
+31:
+ movel #0,%d1
+ andil #0xfffffff0,%d6
+ movel %d6,%a1
+32:
+ movel %a4,%d5
+ addil #PAGESIZE<<6,%d5
+ movel %a1@+,%d6
+ btst #1,%d6 /* is it a table ptr? */
+ jbne 33f /* yes */
+ btst #0,%d6 /* is it a page descriptor? */
+ jbeq 1f /* no */
+ jbsr mmu_030_print_helper
+ jbra 37f
+1:
+ jbsr mmu_print_tuple_invalidate
+ jbra 37f
+33:
+ movel #0,%d2
+ andil #0xfffffff0,%d6
+ movel %d6,%a2
+34:
+ movel %a4,%d5
+ addil #PAGESIZE,%d5
+ movel %a2@+,%d6
+ btst #0,%d6
+ jbne 35f
+ jbsr mmu_print_tuple_invalidate
+ jbra 36f
+35:
+ jbsr mmu_030_print_helper
+36:
+ movel %d5,%a4
+ addq #1,%d2
+ cmpib #64,%d2
+ jbne 34b
+37:
+ movel %d5,%a4
+ addq #1,%d1
+ cmpib #128,%d1
+ jbne 32b
+38:
+ movel %d5,%a4 /* move to the next logical address */
+ addq #1,%d0
+ cmpib #128,%d0
+ jbne 30b
+
+mmu_print_done:
+ puts "\n\n"
+
+func_return mmu_print
+
+
+mmu_030_print_helper:
+ moveml %d0-%d1,%sp@-
+ movel %a4,%d0
+ movel %d6,%d1
+ lea %pc@(mmu_030_print_flags),%a6
+ jbsr mmu_print_tuple
+ moveml %sp@+,%d0-%d1
+ rts
+
+mmu_print_tuple_invalidate:
+ moveml %a0/%d7,%sp@-
+
+ lea %pc@(L(mmu_print_data)),%a0
+ tstl %a0@(mmu_next_valid)
+ jbmi mmu_print_tuple_invalidate_exit
+
+ movel #MMU_PRINT_INVALID,%a0@(mmu_next_valid)
+
+ putn %a4
+
+ puts "##\n"
+
+mmu_print_tuple_invalidate_exit:
+ moveml %sp@+,%a0/%d7
+ rts
+
+
+mmu_print_tuple:
+ moveml %d0-%d7/%a0,%sp@-
+
+ lea %pc@(L(mmu_print_data)),%a0
+
+ tstl %a0@(mmu_next_valid)
+ jble mmu_print_tuple_print
+
+ cmpl %a0@(mmu_next_physical),%d1
+ jbeq mmu_print_tuple_increment
+
+mmu_print_tuple_print:
+ putn %d0
+ puts "->"
+ putn %d1
+
+ movel %d1,%d6
+ jbsr %a6@
+
+mmu_print_tuple_record:
+ movel #MMU_PRINT_VALID,%a0@(mmu_next_valid)
+
+ movel %d1,%a0@(mmu_next_physical)
+
+mmu_print_tuple_increment:
+ movel %d5,%d7
+ subl %a4,%d7
+ addl %d7,%a0@(mmu_next_physical)
+
+mmu_print_tuple_exit:
+ moveml %sp@+,%d0-%d7/%a0
+ rts
+
+mmu_print_machine_cpu_types:
+ puts "machine: "
+
+ is_not_amiga(1f)
+ puts "amiga"
+ jbra 9f
+1:
+ is_not_atari(2f)
+ puts "atari"
+ jbra 9f
+2:
+ is_not_mac(3f)
+ puts "macintosh"
+ jbra 9f
+3: puts "unknown"
+9: putc '\n'
+
+ puts "cputype: 0"
+ is_not_060(1f)
+ putc '6'
+ jbra 9f
+1:
+ is_not_040_or_060(2f)
+ putc '4'
+ jbra 9f
+2: putc '3'
+9: putc '0'
+ putc '\n'
+
+ rts
+#endif /* MMU_PRINT */
+
+/*
+ * mmu_map_tt
+ *
+ * This is a specific function which works on all 680x0 machines.
+ * On 030, 040 & 060 it will attempt to use Transparent Translation
+ * registers (tt1).
+ * On 020 it will call the standard mmu_map which will use early
+ * terminating descriptors.
+ */
+func_start mmu_map_tt,%d0/%d1/%a0,4
+
+ dputs "mmu_map_tt:"
+ dputn ARG1
+ dputn ARG2
+ dputn ARG3
+ dputn ARG4
+ dputc '\n'
+
+ is_020(L(do_map))
+
+ /* Extract the highest bit set
+ */
+ bfffo ARG3{#0,#32},%d1
+ cmpw #8,%d1
+ jcc L(do_map)
+
+ /* And get the mask
+ */
+ moveq #-1,%d0
+ lsrl %d1,%d0
+ lsrl #1,%d0
+
+ /* Mask the address
+ */
+ movel %d0,%d1
+ notl %d1
+ andl ARG2,%d1
+
+ /* Generate the upper 16bit of the tt register
+ */
+ lsrl #8,%d0
+ orl %d0,%d1
+ clrw %d1
+
+ is_040_or_060(L(mmu_map_tt_040))
+
+ /* set 030 specific bits (read/write access for supervisor mode
+ * (highest function code set, lower two bits masked))
+ */
+ orw #TTR_ENABLE+TTR_RWM+TTR_FCB2+TTR_FCM1+TTR_FCM0,%d1
+ movel ARG4,%d0
+ btst #6,%d0
+ jeq 1f
+ orw #TTR_CI,%d1
+
+1: lea STACK,%a0
+ dputn %d1
+ movel %d1,%a0@
+ .chip 68030
+ tstl ARG1
+ jne 1f
+ pmove %a0@,%tt0
+ jra 2f
+1: pmove %a0@,%tt1
+2: .chip 68k
+ jra L(mmu_map_tt_done)
+
+ /* set 040 specific bits
+ */
+L(mmu_map_tt_040):
+ orw #TTR_ENABLE+TTR_KERNELMODE,%d1
+ orl ARG4,%d1
+ dputn %d1
+
+ .chip 68040
+ tstl ARG1
+ jne 1f
+ movec %d1,%itt0
+ movec %d1,%dtt0
+ jra 2f
+1: movec %d1,%itt1
+ movec %d1,%dtt1
+2: .chip 68k
+
+ jra L(mmu_map_tt_done)
+
+L(do_map):
+ mmu_map_eq ARG2,ARG3,ARG4
+
+L(mmu_map_tt_done):
+
+func_return mmu_map_tt
+
+/*
+ * mmu_map
+ *
+ * This routine will map a range of memory using a pointer
+ * table and allocating the pages on the fly from the kernel.
+ * The pointer table does not have to be already linked into
+ * the root table, this routine will do that if necessary.
+ *
+ * NOTE
+ * This routine will assert failure and use the serial_putc
+ * routines in the case of a run-time error. For example,
+ * if the address is already mapped.
+ *
+ * NOTE-2
+ * This routine will use early terminating descriptors
+ * where possible for the 68020+68851 and 68030 type
+ * processors.
+ */
+func_start mmu_map,%d0-%d4/%a0-%a4
+
+ dputs "\nmmu_map:"
+ dputn ARG1
+ dputn ARG2
+ dputn ARG3
+ dputn ARG4
+ dputc '\n'
+
+ /* Get logical address and round it down to 256KB
+ */
+ movel ARG1,%d0
+ andl #-(PAGESIZE*PAGE_TABLE_SIZE),%d0
+ movel %d0,%a3
+
+ /* Get the end address
+ */
+ movel ARG1,%a4
+ addl ARG3,%a4
+ subql #1,%a4
+
+ /* Get physical address and round it down to 256KB
+ */
+ movel ARG2,%d0
+ andl #-(PAGESIZE*PAGE_TABLE_SIZE),%d0
+ movel %d0,%a2
+
+ /* Add page attributes to the physical address
+ */
+ movel ARG4,%d0
+ orw #_PAGE_PRESENT+_PAGE_ACCESSED+_PAGE_DIRTY,%d0
+ addw %d0,%a2
+
+ dputn %a2
+ dputn %a3
+ dputn %a4
+
+ is_not_040_or_060(L(mmu_map_030))
+
+ addw #_PAGE_GLOBAL040,%a2
+/*
+ * MMU 040 & 060 Support
+ *
+ * The MMU usage for the 040 and 060 is different enough from
+ * the 030 and 68851 that there is separate code. This comment
+ * block describes the data structures and algorithms built by
+ * this code.
+ *
+ * The 040 does not support early terminating descriptors, as
+ * the 030 does. Therefore, a third level of table is needed
+ * for the 040, and that would be the page table. In Linux,
+ * page tables are allocated directly from the memory above the
+ * kernel.
+ *
+ */
+
+L(mmu_map_040):
+ /* Calculate the offset into the root table
+ */
+ movel %a3,%d0
+ moveq #ROOT_INDEX_SHIFT,%d1
+ lsrl %d1,%d0
+ mmu_get_root_table_entry %d0
+
+ /* Calculate the offset into the pointer table
+ */
+ movel %a3,%d0
+ moveq #PTR_INDEX_SHIFT,%d1
+ lsrl %d1,%d0
+ andl #PTR_TABLE_SIZE-1,%d0
+ mmu_get_ptr_table_entry %a0,%d0
+
+ /* Calculate the offset into the page table
+ */
+ movel %a3,%d0
+ moveq #PAGE_INDEX_SHIFT,%d1
+ lsrl %d1,%d0
+ andl #PAGE_TABLE_SIZE-1,%d0
+ mmu_get_page_table_entry %a0,%d0
+
+ /* The page table entry must not no be busy
+ */
+ tstl %a0@
+ jne L(mmu_map_error)
+
+ /* Do the mapping and advance the pointers
+ */
+ movel %a2,%a0@
+2:
+ addw #PAGESIZE,%a2
+ addw #PAGESIZE,%a3
+
+ /* Ready with mapping?
+ */
+ lea %a3@(-1),%a0
+ cmpl %a0,%a4
+ jhi L(mmu_map_040)
+ jra L(mmu_map_done)
+
+L(mmu_map_030):
+ /* Calculate the offset into the root table
+ */
+ movel %a3,%d0
+ moveq #ROOT_INDEX_SHIFT,%d1
+ lsrl %d1,%d0
+ mmu_get_root_table_entry %d0
+
+ /* Check if logical address 32MB aligned,
+ * so we can try to map it once
+ */
+ movel %a3,%d0
+ andl #(PTR_TABLE_SIZE*PAGE_TABLE_SIZE*PAGESIZE-1)&(-ROOT_TABLE_SIZE),%d0
+ jne 1f
+
+ /* Is there enough to map for 32MB at once
+ */
+ lea %a3@(PTR_TABLE_SIZE*PAGE_TABLE_SIZE*PAGESIZE-1),%a1
+ cmpl %a1,%a4
+ jcs 1f
+
+ addql #1,%a1
+
+ /* The root table entry must not no be busy
+ */
+ tstl %a0@
+ jne L(mmu_map_error)
+
+ /* Do the mapping and advance the pointers
+ */
+ dputs "early term1"
+ dputn %a2
+ dputn %a3
+ dputn %a1
+ dputc '\n'
+ movel %a2,%a0@
+
+ movel %a1,%a3
+ lea %a2@(PTR_TABLE_SIZE*PAGE_TABLE_SIZE*PAGESIZE),%a2
+ jra L(mmu_mapnext_030)
+1:
+ /* Calculate the offset into the pointer table
+ */
+ movel %a3,%d0
+ moveq #PTR_INDEX_SHIFT,%d1
+ lsrl %d1,%d0
+ andl #PTR_TABLE_SIZE-1,%d0
+ mmu_get_ptr_table_entry %a0,%d0
+
+ /* The pointer table entry must not no be busy
+ */
+ tstl %a0@
+ jne L(mmu_map_error)
+
+ /* Do the mapping and advance the pointers
+ */
+ dputs "early term2"
+ dputn %a2
+ dputn %a3
+ dputc '\n'
+ movel %a2,%a0@
+
+ addl #PAGE_TABLE_SIZE*PAGESIZE,%a2
+ addl #PAGE_TABLE_SIZE*PAGESIZE,%a3
+
+L(mmu_mapnext_030):
+ /* Ready with mapping?
+ */
+ lea %a3@(-1),%a0
+ cmpl %a0,%a4
+ jhi L(mmu_map_030)
+ jra L(mmu_map_done)
+
+L(mmu_map_error):
+
+ dputs "mmu_map error:"
+ dputn %a2
+ dputn %a3
+ dputc '\n'
+
+L(mmu_map_done):
+
+func_return mmu_map
+
+/*
+ * mmu_fixup
+ *
+ * On the 040 class machines, all pages that are used for the
+ * mmu have to be fixed up.
+ */
+
+func_start mmu_fixup_page_mmu_cache,%d0/%a0
+
+ dputs "mmu_fixup_page_mmu_cache"
+ dputn ARG1
+
+ /* Calculate the offset into the root table
+ */
+ movel ARG1,%d0
+ moveq #ROOT_INDEX_SHIFT,%d1
+ lsrl %d1,%d0
+ mmu_get_root_table_entry %d0
+
+ /* Calculate the offset into the pointer table
+ */
+ movel ARG1,%d0
+ moveq #PTR_INDEX_SHIFT,%d1
+ lsrl %d1,%d0
+ andl #PTR_TABLE_SIZE-1,%d0
+ mmu_get_ptr_table_entry %a0,%d0
+
+ /* Calculate the offset into the page table
+ */
+ movel ARG1,%d0
+ moveq #PAGE_INDEX_SHIFT,%d1
+ lsrl %d1,%d0
+ andl #PAGE_TABLE_SIZE-1,%d0
+ mmu_get_page_table_entry %a0,%d0
+
+ movel %a0@,%d0
+ andil #_CACHEMASK040,%d0
+ orl %pc@(m68k_pgtable_cachemode),%d0
+ movel %d0,%a0@
+
+ dputc '\n'
+
+func_return mmu_fixup_page_mmu_cache
+
+/*
+ * mmu_temp_map
+ *
+ * create a temporary mapping to enable the mmu,
+ * this we don't need any transparation translation tricks.
+ */
+
+func_start mmu_temp_map,%d0/%d1/%a0/%a1
+
+ dputs "mmu_temp_map"
+ dputn ARG1
+ dputn ARG2
+ dputc '\n'
+
+ lea %pc@(L(temp_mmap_mem)),%a1
+
+ /* Calculate the offset in the root table
+ */
+ movel ARG2,%d0
+ moveq #ROOT_INDEX_SHIFT,%d1
+ lsrl %d1,%d0
+ mmu_get_root_table_entry %d0
+
+ /* Check if the table is temporary allocated, so we have to reuse it
+ */
+ movel %a0@,%d0
+ cmpl %pc@(L(memory_start)),%d0
+ jcc 1f
+
+ /* Temporary allocate a ptr table and insert it into the root table
+ */
+ movel %a1@,%d0
+ addl #PTR_TABLE_SIZE*4,%a1@
+ orw #_PAGE_TABLE+_PAGE_ACCESSED,%d0
+ movel %d0,%a0@
+ dputs " (new)"
+1:
+ dputn %d0
+ /* Mask the root table entry for the ptr table
+ */
+ andw #-ROOT_TABLE_SIZE,%d0
+ movel %d0,%a0
+
+ /* Calculate the offset into the pointer table
+ */
+ movel ARG2,%d0
+ moveq #PTR_INDEX_SHIFT,%d1
+ lsrl %d1,%d0
+ andl #PTR_TABLE_SIZE-1,%d0
+ lea %a0@(%d0*4),%a0
+ dputn %a0
+
+ /* Check if a temporary page table is already allocated
+ */
+ movel %a0@,%d0
+ jne 1f
+
+ /* Temporary allocate a page table and insert it into the ptr table
+ */
+ movel %a1@,%d0
+ /* The 512 should be PAGE_TABLE_SIZE*4, but that violates the
+ alignment restriction for pointer tables on the '0[46]0. */
+ addl #512,%a1@
+ orw #_PAGE_TABLE+_PAGE_ACCESSED,%d0
+ movel %d0,%a0@
+ dputs " (new)"
+1:
+ dputn %d0
+ /* Mask the ptr table entry for the page table
+ */
+ andw #-PTR_TABLE_SIZE,%d0
+ movel %d0,%a0
+
+ /* Calculate the offset into the page table
+ */
+ movel ARG2,%d0
+ moveq #PAGE_INDEX_SHIFT,%d1
+ lsrl %d1,%d0
+ andl #PAGE_TABLE_SIZE-1,%d0
+ lea %a0@(%d0*4),%a0
+ dputn %a0
+
+ /* Insert the address into the page table
+ */
+ movel ARG1,%d0
+ andw #-PAGESIZE,%d0
+ orw #_PAGE_PRESENT+_PAGE_ACCESSED+_PAGE_DIRTY,%d0
+ movel %d0,%a0@
+ dputn %d0
+
+ dputc '\n'
+
+func_return mmu_temp_map
+
+func_start mmu_engage,%d0-%d2/%a0-%a3
+
+ moveq #ROOT_TABLE_SIZE-1,%d0
+ /* Temporarily use a different root table. */
+ lea %pc@(L(kernel_pgdir_ptr)),%a0
+ movel %a0@,%a2
+ movel %pc@(L(memory_start)),%a1
+ movel %a1,%a0@
+ movel %a2,%a0
+1:
+ movel %a0@+,%a1@+
+ dbra %d0,1b
+
+ lea %pc@(L(temp_mmap_mem)),%a0
+ movel %a1,%a0@
+
+ movew #PAGESIZE-1,%d0
+1:
+ clrl %a1@+
+ dbra %d0,1b
+
+ lea %pc@(1b),%a0
+ movel #1b,%a1
+ /* Skip temp mappings if phys == virt */
+ cmpl %a0,%a1
+ jeq 1f
+
+ mmu_temp_map %a0,%a0
+ mmu_temp_map %a0,%a1
+
+ addw #PAGESIZE,%a0
+ addw #PAGESIZE,%a1
+ mmu_temp_map %a0,%a0
+ mmu_temp_map %a0,%a1
+1:
+ movel %pc@(L(memory_start)),%a3
+ movel %pc@(L(phys_kernel_start)),%d2
+
+ is_not_040_or_060(L(mmu_engage_030))
+
+L(mmu_engage_040):
+ .chip 68040
+ nop
+ cinva %bc
+ nop
+ pflusha
+ nop
+ movec %a3,%srp
+ movel #TC_ENABLE+TC_PAGE4K,%d0
+ movec %d0,%tc /* enable the MMU */
+ jmp 1f:l
+1: nop
+ movec %a2,%srp
+ nop
+ cinva %bc
+ nop
+ pflusha
+ .chip 68k
+ jra L(mmu_engage_cleanup)
+
+L(mmu_engage_030_temp):
+ .space 12
+L(mmu_engage_030):
+ .chip 68030
+ lea %pc@(L(mmu_engage_030_temp)),%a0
+ movel #0x80000002,%a0@
+ movel %a3,%a0@(4)
+ movel #0x0808,%d0
+ movec %d0,%cacr
+ pmove %a0@,%srp
+ pflusha
+ /*
+ * enable,super root enable,4096 byte pages,7 bit root index,
+ * 7 bit pointer index, 6 bit page table index.
+ */
+ movel #0x82c07760,%a0@(8)
+ pmove %a0@(8),%tc /* enable the MMU */
+ jmp 1f:l
+1: movel %a2,%a0@(4)
+ movel #0x0808,%d0
+ movec %d0,%cacr
+ pmove %a0@,%srp
+ pflusha
+ .chip 68k
+
+L(mmu_engage_cleanup):
+ subl #PAGE_OFFSET,%d2
+ subl %d2,%a2
+ movel %a2,L(kernel_pgdir_ptr)
+ subl %d2,%fp
+ subl %d2,%sp
+ subl %d2,ARG0
+
+func_return mmu_engage
+
+func_start mmu_get_root_table_entry,%d0/%a1
+
+#if 0
+ dputs "mmu_get_root_table_entry:"
+ dputn ARG1
+ dputs " ="
+#endif
+
+ movel %pc@(L(kernel_pgdir_ptr)),%a0
+ tstl %a0
+ jne 2f
+
+ dputs "\nmmu_init:"
+
+ /* Find the start of free memory, get_bi_record does this for us,
+ * as the bootinfo structure is located directly behind the kernel
+ * and and we simply search for the last entry.
+ */
+ get_bi_record BI_LAST
+ addw #PAGESIZE-1,%a0
+ movel %a0,%d0
+ andw #-PAGESIZE,%d0
+
+ dputn %d0
+
+ lea %pc@(L(memory_start)),%a0
+ movel %d0,%a0@
+ lea %pc@(L(kernel_end)),%a0
+ movel %d0,%a0@
+
+ /* we have to return the first page at _stext since the init code
+ * in mm/init.c simply expects kernel_pg_dir there, the rest of
+ * page is used for further ptr tables in get_ptr_table.
+ */
+ lea %pc@(_stext),%a0
+ lea %pc@(L(mmu_cached_pointer_tables)),%a1
+ movel %a0,%a1@
+ addl #ROOT_TABLE_SIZE*4,%a1@
+
+ lea %pc@(L(mmu_num_pointer_tables)),%a1
+ addql #1,%a1@
+
+ /* clear the page
+ */
+ movel %a0,%a1
+ movew #PAGESIZE/4-1,%d0
+1:
+ clrl %a1@+
+ dbra %d0,1b
+
+ lea %pc@(L(kernel_pgdir_ptr)),%a1
+ movel %a0,%a1@
+
+ dputn %a0
+ dputc '\n'
+2:
+ movel ARG1,%d0
+ lea %a0@(%d0*4),%a0
+
+#if 0
+ dputn %a0
+ dputc '\n'
+#endif
+
+func_return mmu_get_root_table_entry
+
+
+
+func_start mmu_get_ptr_table_entry,%d0/%a1
+
+#if 0
+ dputs "mmu_get_ptr_table_entry:"
+ dputn ARG1
+ dputn ARG2
+ dputs " ="
+#endif
+
+ movel ARG1,%a0
+ movel %a0@,%d0
+ jne 2f
+
+ /* Keep track of the number of pointer tables we use
+ */
+ dputs "\nmmu_get_new_ptr_table:"
+ lea %pc@(L(mmu_num_pointer_tables)),%a0
+ movel %a0@,%d0
+ addql #1,%a0@
+
+ /* See if there is a free pointer table in our cache of pointer tables
+ */
+ lea %pc@(L(mmu_cached_pointer_tables)),%a1
+ andw #7,%d0
+ jne 1f
+
+ /* Get a new pointer table page from above the kernel memory
+ */
+ get_new_page
+ movel %a0,%a1@
+1:
+ /* There is an unused pointer table in our cache... use it
+ */
+ movel %a1@,%d0
+ addl #PTR_TABLE_SIZE*4,%a1@
+
+ dputn %d0
+ dputc '\n'
+
+ /* Insert the new pointer table into the root table
+ */
+ movel ARG1,%a0
+ orw #_PAGE_TABLE+_PAGE_ACCESSED,%d0
+ movel %d0,%a0@
+2:
+ /* Extract the pointer table entry
+ */
+ andw #-PTR_TABLE_SIZE,%d0
+ movel %d0,%a0
+ movel ARG2,%d0
+ lea %a0@(%d0*4),%a0
+
+#if 0
+ dputn %a0
+ dputc '\n'
+#endif
+
+func_return mmu_get_ptr_table_entry
+
+
+func_start mmu_get_page_table_entry,%d0/%a1
+
+#if 0
+ dputs "mmu_get_page_table_entry:"
+ dputn ARG1
+ dputn ARG2
+ dputs " ="
+#endif
+
+ movel ARG1,%a0
+ movel %a0@,%d0
+ jne 2f
+
+ /* If the page table entry doesn't exist, we allocate a complete new
+ * page and use it as one continues big page table which can cover
+ * 4MB of memory, nearly almost all mappings have that alignment.
+ */
+ get_new_page
+ addw #_PAGE_TABLE+_PAGE_ACCESSED,%a0
+
+ /* align pointer table entry for a page of page tables
+ */
+ movel ARG1,%d0
+ andw #-(PAGESIZE/PAGE_TABLE_SIZE),%d0
+ movel %d0,%a1
+
+ /* Insert the page tables into the pointer entries
+ */
+ moveq #PAGESIZE/PAGE_TABLE_SIZE/4-1,%d0
+1:
+ movel %a0,%a1@+
+ lea %a0@(PAGE_TABLE_SIZE*4),%a0
+ dbra %d0,1b
+
+ /* Now we can get the initialized pointer table entry
+ */
+ movel ARG1,%a0
+ movel %a0@,%d0
+2:
+ /* Extract the page table entry
+ */
+ andw #-PAGE_TABLE_SIZE,%d0
+ movel %d0,%a0
+ movel ARG2,%d0
+ lea %a0@(%d0*4),%a0
+
+#if 0
+ dputn %a0
+ dputc '\n'
+#endif
+
+func_return mmu_get_page_table_entry
+
+/*
+ * get_new_page
+ *
+ * Return a new page from the memory start and clear it.
+ */
+func_start get_new_page,%d0/%a1
+
+ dputs "\nget_new_page:"
+
+ /* allocate the page and adjust memory_start
+ */
+ lea %pc@(L(memory_start)),%a0
+ movel %a0@,%a1
+ addl #PAGESIZE,%a0@
+
+ /* clear the new page
+ */
+ movel %a1,%a0
+ movew #PAGESIZE/4-1,%d0
+1:
+ clrl %a1@+
+ dbra %d0,1b
+
+ dputn %a0
+ dputc '\n'
+
+func_return get_new_page
+
+
+
+/*
+ * Debug output support
+ * Atarians have a choice between the parallel port, the serial port
+ * from the MFP or a serial port of the SCC
+ */
+
+#ifdef CONFIG_MAC
+
+L(scc_initable_mac):
+ .byte 9,12 /* Reset */
+ .byte 4,0x44 /* x16, 1 stopbit, no parity */
+ .byte 3,0xc0 /* receiver: 8 bpc */
+ .byte 5,0xe2 /* transmitter: 8 bpc, assert dtr/rts */
+ .byte 9,0 /* no interrupts */
+ .byte 10,0 /* NRZ */
+ .byte 11,0x50 /* use baud rate generator */
+ .byte 12,10,13,0 /* 9600 baud */
+ .byte 14,1 /* Baud rate generator enable */
+ .byte 3,0xc1 /* enable receiver */
+ .byte 5,0xea /* enable transmitter */
+ .byte -1
+ .even
+#endif
+
+#ifdef CONFIG_ATARI
+/* #define USE_PRINTER */
+/* #define USE_SCC_B */
+/* #define USE_SCC_A */
+#define USE_MFP
+
+#if defined(USE_SCC_A) || defined(USE_SCC_B)
+#define USE_SCC
+/* Initialisation table for SCC */
+L(scc_initable):
+ .byte 9,12 /* Reset */
+ .byte 4,0x44 /* x16, 1 stopbit, no parity */
+ .byte 3,0xc0 /* receiver: 8 bpc */
+ .byte 5,0xe2 /* transmitter: 8 bpc, assert dtr/rts */
+ .byte 9,0 /* no interrupts */
+ .byte 10,0 /* NRZ */
+ .byte 11,0x50 /* use baud rate generator */
+ .byte 12,24,13,0 /* 9600 baud */
+ .byte 14,2,14,3 /* use master clock for BRG, enable */
+ .byte 3,0xc1 /* enable receiver */
+ .byte 5,0xea /* enable transmitter */
+ .byte -1
+ .even
+#endif
+
+#ifdef USE_PRINTER
+
+LPSG_SELECT = 0xff8800
+LPSG_READ = 0xff8800
+LPSG_WRITE = 0xff8802
+LPSG_IO_A = 14
+LPSG_IO_B = 15
+LPSG_CONTROL = 7
+LSTMFP_GPIP = 0xfffa01
+LSTMFP_DDR = 0xfffa05
+LSTMFP_IERB = 0xfffa09
+
+#elif defined(USE_SCC_B)
+
+LSCC_CTRL = 0xff8c85
+LSCC_DATA = 0xff8c87
+
+#elif defined(USE_SCC_A)
+
+LSCC_CTRL = 0xff8c81
+LSCC_DATA = 0xff8c83
+
+#elif defined(USE_MFP)
+
+LMFP_UCR = 0xfffa29
+LMFP_TDCDR = 0xfffa1d
+LMFP_TDDR = 0xfffa25
+LMFP_TSR = 0xfffa2d
+LMFP_UDR = 0xfffa2f
+
+#endif
+#endif /* CONFIG_ATARI */
+
+/*
+ * Serial port output support.
+ */
+
+/*
+ * Initialize serial port hardware for 9600/8/1
+ */
+func_start serial_init,%d0/%d1/%a0/%a1
+ /*
+ * Some of the register usage that follows
+ * CONFIG_AMIGA
+ * a0 = pointer to boot info record
+ * d0 = boot info offset
+ * CONFIG_ATARI
+ * a0 = address of SCC
+ * a1 = Liobase address/address of scc_initable
+ * d0 = init data for serial port
+ * CONFIG_MAC
+ * a0 = address of SCC
+ * a1 = address of scc_initable_mac
+ * d0 = init data for serial port
+ */
+
+#ifdef CONFIG_AMIGA
+#define SERIAL_DTR 7
+#define SERIAL_CNTRL CIABBASE+C_PRA
+
+ is_not_amiga(1f)
+ lea %pc@(L(custom)),%a0
+ movel #-ZTWOBASE,%a0@
+ bclr #SERIAL_DTR,SERIAL_CNTRL-ZTWOBASE
+ get_bi_record BI_AMIGA_SERPER
+ movew %a0@,CUSTOMBASE+C_SERPER-ZTWOBASE
+| movew #61,CUSTOMBASE+C_SERPER-ZTWOBASE
+1:
+#endif
+#ifdef CONFIG_ATARI
+ is_not_atari(4f)
+ movel %pc@(L(iobase)),%a1
+#if defined(USE_PRINTER)
+ bclr #0,%a1@(LSTMFP_IERB)
+ bclr #0,%a1@(LSTMFP_DDR)
+ moveb #LPSG_CONTROL,%a1@(LPSG_SELECT)
+ moveb #0xff,%a1@(LPSG_WRITE)
+ moveb #LPSG_IO_B,%a1@(LPSG_SELECT)
+ clrb %a1@(LPSG_WRITE)
+ moveb #LPSG_IO_A,%a1@(LPSG_SELECT)
+ moveb %a1@(LPSG_READ),%d0
+ bset #5,%d0
+ moveb %d0,%a1@(LPSG_WRITE)
+#elif defined(USE_SCC)
+ lea %a1@(LSCC_CTRL),%a0
+ lea %pc@(L(scc_initable)),%a1
+2: moveb %a1@+,%d0
+ jmi 3f
+ moveb %d0,%a0@
+ moveb %a1@+,%a0@
+ jra 2b
+3: clrb %a0@
+#elif defined(USE_MFP)
+ bclr #1,%a1@(LMFP_TSR)
+ moveb #0x88,%a1@(LMFP_UCR)
+ andb #0x70,%a1@(LMFP_TDCDR)
+ moveb #2,%a1@(LMFP_TDDR)
+ orb #1,%a1@(LMFP_TDCDR)
+ bset #1,%a1@(LMFP_TSR)
+#endif
+ jra L(serial_init_done)
+4:
+#endif
+#ifdef CONFIG_MAC
+ is_not_mac(L(serial_init_not_mac))
+#ifdef MAC_SERIAL_DEBUG
+#if !defined(MAC_USE_SCC_A) && !defined(MAC_USE_SCC_B)
+#define MAC_USE_SCC_B
+#endif
+#define mac_scc_cha_b_ctrl_offset 0x0
+#define mac_scc_cha_a_ctrl_offset 0x2
+#define mac_scc_cha_b_data_offset 0x4
+#define mac_scc_cha_a_data_offset 0x6
+
+#ifdef MAC_USE_SCC_A
+ /* Initialize channel A */
+ movel %pc@(L(mac_sccbase)),%a0
+ lea %pc@(L(scc_initable_mac)),%a1
+5: moveb %a1@+,%d0
+ jmi 6f
+ moveb %d0,%a0@(mac_scc_cha_a_ctrl_offset)
+ moveb %a1@+,%a0@(mac_scc_cha_a_ctrl_offset)
+ jra 5b
+6:
+#endif /* MAC_USE_SCC_A */
+
+#ifdef MAC_USE_SCC_B
+ /* Initialize channel B */
+#ifndef MAC_USE_SCC_A /* Load mac_sccbase only if needed */
+ movel %pc@(L(mac_sccbase)),%a0
+#endif /* MAC_USE_SCC_A */
+ lea %pc@(L(scc_initable_mac)),%a1
+7: moveb %a1@+,%d0
+ jmi 8f
+ moveb %d0,%a0@(mac_scc_cha_b_ctrl_offset)
+ moveb %a1@+,%a0@(mac_scc_cha_b_ctrl_offset)
+ jra 7b
+8:
+#endif /* MAC_USE_SCC_B */
+#endif /* MAC_SERIAL_DEBUG */
+
+ jra L(serial_init_done)
+L(serial_init_not_mac):
+#endif /* CONFIG_MAC */
+
+#ifdef CONFIG_Q40
+ is_not_q40(2f)
+/* debug output goes into SRAM, so we don't do it unless requested
+ - check for '%LX$' signature in SRAM */
+ lea %pc@(q40_mem_cptr),%a1
+ move.l #0xff020010,%a1@ /* must be inited - also used by debug=mem */
+ move.l #0xff020000,%a1
+ cmp.b #'%',%a1@
+ bne 2f /*nodbg*/
+ addq.w #4,%a1
+ cmp.b #'L',%a1@
+ bne 2f /*nodbg*/
+ addq.w #4,%a1
+ cmp.b #'X',%a1@
+ bne 2f /*nodbg*/
+ addq.w #4,%a1
+ cmp.b #'$',%a1@
+ bne 2f /*nodbg*/
+ /* signature OK */
+ lea %pc@(L(q40_do_debug)),%a1
+ tas %a1@
+/*nodbg: q40_do_debug is 0 by default*/
+2:
+#endif
+
+#ifdef CONFIG_APOLLO
+/* We count on the PROM initializing SIO1 */
+#endif
+
+#ifdef CONFIG_HP300
+/* We count on the boot loader initialising the UART */
+#endif
+
+L(serial_init_done):
+func_return serial_init
+
+/*
+ * Output character on serial port.
+ */
+func_start serial_putc,%d0/%d1/%a0/%a1
+
+ movel ARG1,%d0
+ cmpib #'\n',%d0
+ jbne 1f
+
+ /* A little safe recursion is good for the soul */
+ serial_putc #'\r'
+1:
+
+#ifdef CONFIG_AMIGA
+ is_not_amiga(2f)
+ andw #0x00ff,%d0
+ oriw #0x0100,%d0
+ movel %pc@(L(custom)),%a0
+ movew %d0,%a0@(CUSTOMBASE+C_SERDAT)
+1: movew %a0@(CUSTOMBASE+C_SERDATR),%d0
+ andw #0x2000,%d0
+ jeq 1b
+ jra L(serial_putc_done)
+2:
+#endif
+
+#ifdef CONFIG_MAC
+ is_not_mac(5f)
+
+#ifdef MAC_SERIAL_DEBUG
+
+#ifdef MAC_USE_SCC_A
+ movel %pc@(L(mac_sccbase)),%a1
+3: btst #2,%a1@(mac_scc_cha_a_ctrl_offset)
+ jeq 3b
+ moveb %d0,%a1@(mac_scc_cha_a_data_offset)
+#endif /* MAC_USE_SCC_A */
+
+#ifdef MAC_USE_SCC_B
+#ifndef MAC_USE_SCC_A /* Load mac_sccbase only if needed */
+ movel %pc@(L(mac_sccbase)),%a1
+#endif /* MAC_USE_SCC_A */
+4: btst #2,%a1@(mac_scc_cha_b_ctrl_offset)
+ jeq 4b
+ moveb %d0,%a1@(mac_scc_cha_b_data_offset)
+#endif /* MAC_USE_SCC_B */
+
+#endif /* MAC_SERIAL_DEBUG */
+
+ jra L(serial_putc_done)
+5:
+#endif /* CONFIG_MAC */
+
+#ifdef CONFIG_ATARI
+ is_not_atari(4f)
+ movel %pc@(L(iobase)),%a1
+#if defined(USE_PRINTER)
+3: btst #0,%a1@(LSTMFP_GPIP)
+ jne 3b
+ moveb #LPSG_IO_B,%a1@(LPSG_SELECT)
+ moveb %d0,%a1@(LPSG_WRITE)
+ moveb #LPSG_IO_A,%a1@(LPSG_SELECT)
+ moveb %a1@(LPSG_READ),%d0
+ bclr #5,%d0
+ moveb %d0,%a1@(LPSG_WRITE)
+ nop
+ nop
+ bset #5,%d0
+ moveb %d0,%a1@(LPSG_WRITE)
+#elif defined(USE_SCC)
+3: btst #2,%a1@(LSCC_CTRL)
+ jeq 3b
+ moveb %d0,%a1@(LSCC_DATA)
+#elif defined(USE_MFP)
+3: btst #7,%a1@(LMFP_TSR)
+ jeq 3b
+ moveb %d0,%a1@(LMFP_UDR)
+#endif
+ jra L(serial_putc_done)
+4:
+#endif /* CONFIG_ATARI */
+
+#ifdef CONFIG_MVME147
+ is_not_mvme147(2f)
+1: btst #2,M147_SCC_CTRL_A
+ jeq 1b
+ moveb %d0,M147_SCC_DATA_A
+ jbra L(serial_putc_done)
+2:
+#endif
+
+#ifdef CONFIG_MVME16x
+ is_not_mvme16x(2f)
+ /*
+ * If the loader gave us a board type then we can use that to
+ * select an appropriate output routine; otherwise we just use
+ * the Bug code. If we haev to use the Bug that means the Bug
+ * workspace has to be valid, which means the Bug has to use
+ * the SRAM, which is non-standard.
+ */
+ moveml %d0-%d7/%a2-%a6,%sp@-
+ movel vme_brdtype,%d1
+ jeq 1f | No tag - use the Bug
+ cmpi #VME_TYPE_MVME162,%d1
+ jeq 6f
+ cmpi #VME_TYPE_MVME172,%d1
+ jne 5f
+ /* 162/172; it's an SCC */
+6: btst #2,M162_SCC_CTRL_A
+ nop
+ nop
+ nop
+ jeq 6b
+ moveb #8,M162_SCC_CTRL_A
+ nop
+ nop
+ nop
+ moveb %d0,M162_SCC_CTRL_A
+ jra 3f
+5:
+ /* 166/167/177; it's a CD2401 */
+ moveb #0,M167_CYCAR
+ moveb M167_CYIER,%d2
+ moveb #0x02,M167_CYIER
+7:
+ btst #5,M167_PCSCCTICR
+ jeq 7b
+ moveb M167_PCTPIACKR,%d1
+ moveb M167_CYLICR,%d1
+ jeq 8f
+ moveb #0x08,M167_CYTEOIR
+ jra 7b
+8:
+ moveb %d0,M167_CYTDR
+ moveb #0,M167_CYTEOIR
+ moveb %d2,M167_CYIER
+ jra 3f
+1:
+ moveb %d0,%sp@-
+ trap #15
+ .word 0x0020 /* TRAP 0x020 */
+3:
+ moveml %sp@+,%d0-%d7/%a2-%a6
+ jbra L(serial_putc_done)
+2:
+#endif /* CONFIG_MVME16x */
+
+#ifdef CONFIG_BVME6000
+ is_not_bvme6000(2f)
+ /*
+ * The BVME6000 machine has a serial port ...
+ */
+1: btst #2,BVME_SCC_CTRL_A
+ jeq 1b
+ moveb %d0,BVME_SCC_DATA_A
+ jbra L(serial_putc_done)
+2:
+#endif
+
+#ifdef CONFIG_SUN3X
+ is_not_sun3x(2f)
+ movel %d0,-(%sp)
+ movel 0xFEFE0018,%a1
+ jbsr (%a1)
+ addq #4,%sp
+ jbra L(serial_putc_done)
+2:
+#endif
+
+#ifdef CONFIG_Q40
+ is_not_q40(2f)
+ tst.l %pc@(L(q40_do_debug)) /* only debug if requested */
+ beq 2f
+ lea %pc@(q40_mem_cptr),%a1
+ move.l %a1@,%a0
+ move.b %d0,%a0@
+ addq.l #4,%a0
+ move.l %a0,%a1@
+ jbra L(serial_putc_done)
+2:
+#endif
+
+#ifdef CONFIG_APOLLO
+ is_not_apollo(2f)
+ movl %pc@(L(iobase)),%a1
+ moveb %d0,%a1@(LTHRB0)
+1: moveb %a1@(LSRB0),%d0
+ andb #0x4,%d0
+ beq 1b
+ jbra L(serial_putc_done)
+2:
+#endif
+
+#ifdef CONFIG_HP300
+ is_not_hp300(3f)
+ movl %pc@(L(iobase)),%a1
+ addl %pc@(L(uartbase)),%a1
+ movel %pc@(L(uart_scode)),%d1 /* Check the scode */
+ jmi 3f /* Unset? Exit */
+ cmpi #256,%d1 /* APCI scode? */
+ jeq 2f
+1: moveb %a1@(DCALSR),%d1 /* Output to DCA */
+ andb #0x20,%d1
+ beq 1b
+ moveb %d0,%a1@(DCADATA)
+ jbra L(serial_putc_done)
+2: moveb %a1@(APCILSR),%d1 /* Output to APCI */
+ andb #0x20,%d1
+ beq 2b
+ moveb %d0,%a1@(APCIDATA)
+ jbra L(serial_putc_done)
+3:
+#endif
+
+L(serial_putc_done):
+func_return serial_putc
+
+/*
+ * Output a string.
+ */
+func_start puts,%d0/%a0
+
+ movel ARG1,%a0
+ jra 2f
+1:
+#ifdef CONSOLE
+ console_putc %d0
+#endif
+#ifdef SERIAL_DEBUG
+ serial_putc %d0
+#endif
+2: moveb %a0@+,%d0
+ jne 1b
+
+func_return puts
+
+/*
+ * Output number in hex notation.
+ */
+
+func_start putn,%d0-%d2
+
+ putc ' '
+
+ movel ARG1,%d0
+ moveq #7,%d1
+1: roll #4,%d0
+ move %d0,%d2
+ andb #0x0f,%d2
+ addb #'0',%d2
+ cmpb #'9',%d2
+ jls 2f
+ addb #'A'-('9'+1),%d2
+2:
+#ifdef CONSOLE
+ console_putc %d2
+#endif
+#ifdef SERIAL_DEBUG
+ serial_putc %d2
+#endif
+ dbra %d1,1b
+
+func_return putn
+
+#ifdef CONFIG_MAC
+/*
+ * mac_serial_print
+ *
+ * This routine takes its parameters on the stack. It then
+ * turns around and calls the internal routine. This routine
+ * is used until the Linux console driver initializes itself.
+ *
+ * The calling parameters are:
+ * void mac_serial_print(const char *str);
+ *
+ * This routine does NOT understand variable arguments only
+ * simple strings!
+ */
+ENTRY(mac_serial_print)
+ moveml %d0/%a0,%sp@-
+#if 1
+ move %sr,%sp@-
+ ori #0x0700,%sr
+#endif
+ movel %sp@(10),%a0 /* fetch parameter */
+ jra 2f
+1: serial_putc %d0
+2: moveb %a0@+,%d0
+ jne 1b
+#if 1
+ move %sp@+,%sr
+#endif
+ moveml %sp@+,%d0/%a0
+ rts
+#endif /* CONFIG_MAC */
+
+#if defined(CONFIG_HP300) || defined(CONFIG_APOLLO)
+func_start set_leds,%d0/%a0
+ movel ARG1,%d0
+#ifdef CONFIG_HP300
+ is_not_hp300(1f)
+ movel %pc@(L(iobase)),%a0
+ moveb %d0,%a0@(0x1ffff)
+ jra 2f
+#endif
+1:
+#ifdef CONFIG_APOLLO
+ movel %pc@(L(iobase)),%a0
+ lsll #8,%d0
+ eorw #0xff00,%d0
+ moveb %d0,%a0@(LCPUCTRL)
+#endif
+2:
+func_return set_leds
+#endif
+
+#ifdef CONSOLE
+/*
+ * For continuity, see the data alignment
+ * to which this structure is tied.
+ */
+#define Lconsole_struct_cur_column 0
+#define Lconsole_struct_cur_row 4
+#define Lconsole_struct_num_columns 8
+#define Lconsole_struct_num_rows 12
+#define Lconsole_struct_left_edge 16
+#define Lconsole_struct_penguin_putc 20
+
+func_start console_init,%a0-%a4/%d0-%d7
+ /*
+ * Some of the register usage that follows
+ * a0 = pointer to boot_info
+ * a1 = pointer to screen
+ * a2 = pointer to Lconsole_globals
+ * d3 = pixel width of screen
+ * d4 = pixel height of screen
+ * (d3,d4) ~= (x,y) of a point just below
+ * and to the right of the screen
+ * NOT on the screen!
+ * d5 = number of bytes per scan line
+ * d6 = number of bytes on the entire screen
+ */
+
+ lea %pc@(L(console_globals)),%a2
+ movel %pc@(L(mac_videobase)),%a1
+ movel %pc@(L(mac_rowbytes)),%d5
+ movel %pc@(L(mac_dimensions)),%d3 /* -> low byte */
+ movel %d3,%d4
+ swap %d4 /* -> high byte */
+ andl #0xffff,%d3 /* d3 = screen width in pixels */
+ andl #0xffff,%d4 /* d4 = screen height in pixels */
+
+ movel %d5,%d6
+| subl #20,%d6
+ mulul %d4,%d6 /* scan line bytes x num scan lines */
+ divul #8,%d6 /* we'll clear 8 bytes at a time */
+ moveq #-1,%d0 /* Mac_black */
+ subq #1,%d6
+
+L(console_clear_loop):
+ movel %d0,%a1@+
+ movel %d0,%a1@+
+ dbra %d6,L(console_clear_loop)
+
+ /* Calculate font size */
+
+#if defined(FONT_8x8) && defined(CONFIG_FONT_8x8)
+ lea %pc@(font_vga_8x8),%a0
+#elif defined(FONT_8x16) && defined(CONFIG_FONT_8x16)
+ lea %pc@(font_vga_8x16),%a0
+#elif defined(FONT_6x11) && defined(CONFIG_FONT_6x11)
+ lea %pc@(font_vga_6x11),%a0
+#elif defined(CONFIG_FONT_8x8) /* default */
+ lea %pc@(font_vga_8x8),%a0
+#else /* no compiled-in font */
+ lea 0,%a0
+#endif
+
+ /*
+ * At this point we make a shift in register usage
+ * a1 = address of console_font pointer
+ */
+ lea %pc@(L(console_font)),%a1
+ movel %a0,%a1@ /* store pointer to struct fbcon_font_desc in console_font */
+ tstl %a0
+ jeq 1f
+ lea %pc@(L(console_font_data)),%a4
+ movel %a0@(FONT_DESC_DATA),%d0
+ subl #L(console_font),%a1
+ addl %a1,%d0
+ movel %d0,%a4@
+
+ /*
+ * Calculate global maxs
+ * Note - we can use either an
+ * 8 x 16 or 8 x 8 character font
+ * 6 x 11 also supported
+ */
+ /* ASSERT: a0 = contents of Lconsole_font */
+ movel %d3,%d0 /* screen width in pixels */
+ divul %a0@(FONT_DESC_WIDTH),%d0 /* d0 = max num chars per row */
+
+ movel %d4,%d1 /* screen height in pixels */
+ divul %a0@(FONT_DESC_HEIGHT),%d1 /* d1 = max num rows */
+
+ movel %d0,%a2@(Lconsole_struct_num_columns)
+ movel %d1,%a2@(Lconsole_struct_num_rows)
+
+ /*
+ * Clear the current row and column
+ */
+ clrl %a2@(Lconsole_struct_cur_column)
+ clrl %a2@(Lconsole_struct_cur_row)
+ clrl %a2@(Lconsole_struct_left_edge)
+
+ /*
+ * Initialization is complete
+ */
+1:
+func_return console_init
+
+func_start console_put_stats,%a0/%d7
+ /*
+ * Some of the register usage that follows
+ * a0 = pointer to boot_info
+ * d7 = value of boot_info fields
+ */
+ puts "\nMacLinux\n\n"
+
+#ifdef SERIAL_DEBUG
+ puts " vidaddr:"
+ putn %pc@(L(mac_videobase)) /* video addr. */
+
+ puts "\n _stext:"
+ lea %pc@(_stext),%a0
+ putn %a0
+
+ puts "\nbootinfo:"
+ lea %pc@(_end),%a0
+ putn %a0
+
+ puts "\ncpuid:"
+ putn %pc@(L(cputype))
+ putc '\n'
+
+#ifdef MAC_SERIAL_DEBUG
+ putn %pc@(L(mac_sccbase))
+ putc '\n'
+#endif
+# if defined(MMU_PRINT)
+ jbsr mmu_print_machine_cpu_types
+# endif /* MMU_PRINT */
+#endif /* SERIAL_DEBUG */
+
+func_return console_put_stats
+
+#ifdef CONSOLE_PENGUIN
+func_start console_put_penguin,%a0-%a1/%d0-%d7
+ /*
+ * Get 'that_penguin' onto the screen in the upper right corner
+ * penguin is 64 x 74 pixels, align against right edge of screen
+ */
+ lea %pc@(L(mac_dimensions)),%a0
+ movel %a0@,%d0
+ andil #0xffff,%d0
+ subil #64,%d0 /* snug up against the right edge */
+ clrl %d1 /* start at the top */
+ movel #73,%d7
+ lea %pc@(L(that_penguin)),%a1
+L(console_penguin_row):
+ movel #31,%d6
+L(console_penguin_pixel_pair):
+ moveb %a1@,%d2
+ lsrb #4,%d2
+ console_plot_pixel %d0,%d1,%d2
+ addq #1,%d0
+ moveb %a1@+,%d2
+ console_plot_pixel %d0,%d1,%d2
+ addq #1,%d0
+ dbra %d6,L(console_penguin_pixel_pair)
+
+ subil #64,%d0
+ addq #1,%d1
+ dbra %d7,L(console_penguin_row)
+
+func_return console_put_penguin
+
+/* include penguin bitmap */
+L(that_penguin):
+#include "../mac/mac_penguin.S"
+#endif
+
+ /*
+ * Calculate source and destination addresses
+ * output a1 = dest
+ * a2 = source
+ */
+
+func_start console_scroll,%a0-%a4/%d0-%d7
+ lea %pc@(L(mac_videobase)),%a0
+ movel %a0@,%a1
+ movel %a1,%a2
+ lea %pc@(L(mac_rowbytes)),%a0
+ movel %a0@,%d5
+ movel %pc@(L(console_font)),%a0
+ tstl %a0
+ jeq 1f
+ mulul %a0@(FONT_DESC_HEIGHT),%d5 /* account for # scan lines per character */
+ addal %d5,%a2
+
+ /*
+ * Get dimensions
+ */
+ lea %pc@(L(mac_dimensions)),%a0
+ movel %a0@,%d3
+ movel %d3,%d4
+ swap %d4
+ andl #0xffff,%d3 /* d3 = screen width in pixels */
+ andl #0xffff,%d4 /* d4 = screen height in pixels */
+
+ /*
+ * Calculate number of bytes to move
+ */
+ lea %pc@(L(mac_rowbytes)),%a0
+ movel %a0@,%d6
+ movel %pc@(L(console_font)),%a0
+ subl %a0@(FONT_DESC_HEIGHT),%d4 /* we're not scrolling the top row! */
+ mulul %d4,%d6 /* scan line bytes x num scan lines */
+ divul #32,%d6 /* we'll move 8 longs at a time */
+ subq #1,%d6
+
+L(console_scroll_loop):
+ movel %a2@+,%a1@+
+ movel %a2@+,%a1@+
+ movel %a2@+,%a1@+
+ movel %a2@+,%a1@+
+ movel %a2@+,%a1@+
+ movel %a2@+,%a1@+
+ movel %a2@+,%a1@+
+ movel %a2@+,%a1@+
+ dbra %d6,L(console_scroll_loop)
+
+ lea %pc@(L(mac_rowbytes)),%a0
+ movel %a0@,%d6
+ movel %pc@(L(console_font)),%a0
+ mulul %a0@(FONT_DESC_HEIGHT),%d6 /* scan line bytes x font height */
+ divul #32,%d6 /* we'll move 8 words at a time */
+ subq #1,%d6
+
+ moveq #-1,%d0
+L(console_scroll_clear_loop):
+ movel %d0,%a1@+
+ movel %d0,%a1@+
+ movel %d0,%a1@+
+ movel %d0,%a1@+
+ movel %d0,%a1@+
+ movel %d0,%a1@+
+ movel %d0,%a1@+
+ movel %d0,%a1@+
+ dbra %d6,L(console_scroll_clear_loop)
+
+1:
+func_return console_scroll
+
+
+func_start console_putc,%a0/%a1/%d0-%d7
+
+ is_not_mac(L(console_exit))
+ tstl %pc@(L(console_font))
+ jeq L(console_exit)
+
+ /* Output character in d7 on console.
+ */
+ movel ARG1,%d7
+ cmpib #'\n',%d7
+ jbne 1f
+
+ /* A little safe recursion is good for the soul */
+ console_putc #'\r'
+1:
+ lea %pc@(L(console_globals)),%a0
+
+ cmpib #10,%d7
+ jne L(console_not_lf)
+ movel %a0@(Lconsole_struct_cur_row),%d0
+ addil #1,%d0
+ movel %d0,%a0@(Lconsole_struct_cur_row)
+ movel %a0@(Lconsole_struct_num_rows),%d1
+ cmpl %d1,%d0
+ jcs 1f
+ subil #1,%d0
+ movel %d0,%a0@(Lconsole_struct_cur_row)
+ console_scroll
+1:
+ jra L(console_exit)
+
+L(console_not_lf):
+ cmpib #13,%d7
+ jne L(console_not_cr)
+ clrl %a0@(Lconsole_struct_cur_column)
+ jra L(console_exit)
+
+L(console_not_cr):
+ cmpib #1,%d7
+ jne L(console_not_home)
+ clrl %a0@(Lconsole_struct_cur_row)
+ clrl %a0@(Lconsole_struct_cur_column)
+ jra L(console_exit)
+
+/*
+ * At this point we know that the %d7 character is going to be
+ * rendered on the screen. Register usage is -
+ * a0 = pointer to console globals
+ * a1 = font data
+ * d0 = cursor column
+ * d1 = cursor row to draw the character
+ * d7 = character number
+ */
+L(console_not_home):
+ movel %a0@(Lconsole_struct_cur_column),%d0
+ addql #1,%a0@(Lconsole_struct_cur_column)
+ movel %a0@(Lconsole_struct_num_columns),%d1
+ cmpl %d1,%d0
+ jcs 1f
+ console_putc #'\n' /* recursion is OK! */
+1:
+ movel %a0@(Lconsole_struct_cur_row),%d1
+
+ /*
+ * At this point we make a shift in register usage
+ * a0 = address of pointer to font data (fbcon_font_desc)
+ */
+ movel %pc@(L(console_font)),%a0
+ movel %pc@(L(console_font_data)),%a1 /* Load fbcon_font_desc.data into a1 */
+ andl #0x000000ff,%d7
+ /* ASSERT: a0 = contents of Lconsole_font */
+ mulul %a0@(FONT_DESC_HEIGHT),%d7 /* d7 = index into font data */
+ addl %d7,%a1 /* a1 = points to char image */
+
+ /*
+ * At this point we make a shift in register usage
+ * d0 = pixel coordinate, x
+ * d1 = pixel coordinate, y
+ * d2 = (bit 0) 1/0 for white/black (!) pixel on screen
+ * d3 = font scan line data (8 pixels)
+ * d6 = count down for the font's pixel width (8)
+ * d7 = count down for the font's pixel count in height
+ */
+ /* ASSERT: a0 = contents of Lconsole_font */
+ mulul %a0@(FONT_DESC_WIDTH),%d0
+ mulul %a0@(FONT_DESC_HEIGHT),%d1
+ movel %a0@(FONT_DESC_HEIGHT),%d7 /* Load fbcon_font_desc.height into d7 */
+ subq #1,%d7
+L(console_read_char_scanline):
+ moveb %a1@+,%d3
+
+ /* ASSERT: a0 = contents of Lconsole_font */
+ movel %a0@(FONT_DESC_WIDTH),%d6 /* Load fbcon_font_desc.width into d6 */
+ subql #1,%d6
+
+L(console_do_font_scanline):
+ lslb #1,%d3
+ scsb %d2 /* convert 1 bit into a byte */
+ console_plot_pixel %d0,%d1,%d2
+ addq #1,%d0
+ dbra %d6,L(console_do_font_scanline)
+
+ /* ASSERT: a0 = contents of Lconsole_font */
+ subl %a0@(FONT_DESC_WIDTH),%d0
+ addq #1,%d1
+ dbra %d7,L(console_read_char_scanline)
+
+L(console_exit):
+func_return console_putc
+
+ /*
+ * Input:
+ * d0 = x coordinate
+ * d1 = y coordinate
+ * d2 = (bit 0) 1/0 for white/black (!)
+ * All registers are preserved
+ */
+func_start console_plot_pixel,%a0-%a1/%d0-%d4
+
+ movel %pc@(L(mac_videobase)),%a1
+ movel %pc@(L(mac_videodepth)),%d3
+ movel ARG1,%d0
+ movel ARG2,%d1
+ mulul %pc@(L(mac_rowbytes)),%d1
+ movel ARG3,%d2
+
+ /*
+ * Register usage:
+ * d0 = x coord becomes byte offset into frame buffer
+ * d1 = y coord
+ * d2 = black or white (0/1)
+ * d3 = video depth
+ * d4 = temp of x (d0) for many bit depths
+ */
+L(test_1bit):
+ cmpb #1,%d3
+ jbne L(test_2bit)
+ movel %d0,%d4 /* we need the low order 3 bits! */
+ divul #8,%d0
+ addal %d0,%a1
+ addal %d1,%a1
+ andb #7,%d4
+ eorb #7,%d4 /* reverse the x-coordinate w/ screen-bit # */
+ andb #1,%d2
+ jbne L(white_1)
+ bsetb %d4,%a1@
+ jbra L(console_plot_pixel_exit)
+L(white_1):
+ bclrb %d4,%a1@
+ jbra L(console_plot_pixel_exit)
+
+L(test_2bit):
+ cmpb #2,%d3
+ jbne L(test_4bit)
+ movel %d0,%d4 /* we need the low order 2 bits! */
+ divul #4,%d0
+ addal %d0,%a1
+ addal %d1,%a1
+ andb #3,%d4
+ eorb #3,%d4 /* reverse the x-coordinate w/ screen-bit # */
+ lsll #1,%d4 /* ! */
+ andb #1,%d2
+ jbne L(white_2)
+ bsetb %d4,%a1@
+ addq #1,%d4
+ bsetb %d4,%a1@
+ jbra L(console_plot_pixel_exit)
+L(white_2):
+ bclrb %d4,%a1@
+ addq #1,%d4
+ bclrb %d4,%a1@
+ jbra L(console_plot_pixel_exit)
+
+L(test_4bit):
+ cmpb #4,%d3
+ jbne L(test_8bit)
+ movel %d0,%d4 /* we need the low order bit! */
+ divul #2,%d0
+ addal %d0,%a1
+ addal %d1,%a1
+ andb #1,%d4
+ eorb #1,%d4
+ lsll #2,%d4 /* ! */
+ andb #1,%d2
+ jbne L(white_4)
+ bsetb %d4,%a1@
+ addq #1,%d4
+ bsetb %d4,%a1@
+ addq #1,%d4
+ bsetb %d4,%a1@
+ addq #1,%d4
+ bsetb %d4,%a1@
+ jbra L(console_plot_pixel_exit)
+L(white_4):
+ bclrb %d4,%a1@
+ addq #1,%d4
+ bclrb %d4,%a1@
+ addq #1,%d4
+ bclrb %d4,%a1@
+ addq #1,%d4
+ bclrb %d4,%a1@
+ jbra L(console_plot_pixel_exit)
+
+L(test_8bit):
+ cmpb #8,%d3
+ jbne L(test_16bit)
+ addal %d0,%a1
+ addal %d1,%a1
+ andb #1,%d2
+ jbne L(white_8)
+ moveb #0xff,%a1@
+ jbra L(console_plot_pixel_exit)
+L(white_8):
+ clrb %a1@
+ jbra L(console_plot_pixel_exit)
+
+L(test_16bit):
+ cmpb #16,%d3
+ jbne L(console_plot_pixel_exit)
+ addal %d0,%a1
+ addal %d0,%a1
+ addal %d1,%a1
+ andb #1,%d2
+ jbne L(white_16)
+ clrw %a1@
+ jbra L(console_plot_pixel_exit)
+L(white_16):
+ movew #0x0fff,%a1@
+ jbra L(console_plot_pixel_exit)
+
+L(console_plot_pixel_exit):
+func_return console_plot_pixel
+#endif /* CONSOLE */
+
+#if 0
+/*
+ * This is some old code lying around. I don't believe
+ * it's used or important anymore. My guess is it contributed
+ * to getting to this point, but it's done for now.
+ * It was still in the 2.1.77 head.S, so it's still here.
+ * (And still not used!)
+ */
+L(showtest):
+ moveml %a0/%d7,%sp@-
+ puts "A="
+ putn %a1
+
+ .long 0xf0119f15 | ptestr #5,%a1@,#7,%a0
+
+ puts "DA="
+ putn %a0
+
+ puts "D="
+ putn %a0@
+
+ puts "S="
+ lea %pc@(L(mmu)),%a0
+ .long 0xf0106200 | pmove %psr,%a0@
+ clrl %d7
+ movew %a0@,%d7
+ putn %d7
+
+ putc '\n'
+ moveml %sp@+,%a0/%d7
+ rts
+#endif /* 0 */
+
+__INITDATA
+ .align 4
+
+#if defined(CONFIG_ATARI) || defined(CONFIG_AMIGA) || \
+ defined(CONFIG_HP300) || defined(CONFIG_APOLLO)
+L(custom):
+L(iobase):
+ .long 0
+#endif
+
+#if defined(CONSOLE)
+L(console_globals):
+ .long 0 /* cursor column */
+ .long 0 /* cursor row */
+ .long 0 /* max num columns */
+ .long 0 /* max num rows */
+ .long 0 /* left edge */
+ .long 0 /* mac putc */
+L(console_font):
+ .long 0 /* pointer to console font (struct font_desc) */
+L(console_font_data):
+ .long 0 /* pointer to console font data */
+#endif /* CONSOLE */
+
+#if defined(MMU_PRINT)
+L(mmu_print_data):
+ .long 0 /* valid flag */
+ .long 0 /* start logical */
+ .long 0 /* next logical */
+ .long 0 /* start physical */
+ .long 0 /* next physical */
+#endif /* MMU_PRINT */
+
+L(cputype):
+ .long 0
+L(mmu_cached_pointer_tables):
+ .long 0
+L(mmu_num_pointer_tables):
+ .long 0
+L(phys_kernel_start):
+ .long 0
+L(kernel_end):
+ .long 0
+L(memory_start):
+ .long 0
+L(kernel_pgdir_ptr):
+ .long 0
+L(temp_mmap_mem):
+ .long 0
+
+#if defined (CONFIG_MVME147)
+M147_SCC_CTRL_A = 0xfffe3002
+M147_SCC_DATA_A = 0xfffe3003
+#endif
+
+#if defined (CONFIG_MVME16x)
+M162_SCC_CTRL_A = 0xfff45005
+M167_CYCAR = 0xfff450ee
+M167_CYIER = 0xfff45011
+M167_CYLICR = 0xfff45026
+M167_CYTEOIR = 0xfff45085
+M167_CYTDR = 0xfff450f8
+M167_PCSCCTICR = 0xfff4201e
+M167_PCTPIACKR = 0xfff42025
+#endif
+
+#if defined (CONFIG_BVME6000)
+BVME_SCC_CTRL_A = 0xffb0000b
+BVME_SCC_DATA_A = 0xffb0000f
+#endif
+
+#if defined(CONFIG_MAC)
+L(mac_booter_data):
+ .long 0
+L(mac_videobase):
+ .long 0
+L(mac_videodepth):
+ .long 0
+L(mac_dimensions):
+ .long 0
+L(mac_rowbytes):
+ .long 0
+#ifdef MAC_SERIAL_DEBUG
+L(mac_sccbase):
+ .long 0
+#endif /* MAC_SERIAL_DEBUG */
+#endif
+
+#if defined (CONFIG_APOLLO)
+LSRB0 = 0x10412
+LTHRB0 = 0x10416
+LCPUCTRL = 0x10100
+#endif
+
+#if defined(CONFIG_HP300)
+DCADATA = 0x11
+DCALSR = 0x1b
+APCIDATA = 0x00
+APCILSR = 0x14
+L(uartbase):
+ .long 0
+L(uart_scode):
+ .long -1
+#endif
+
+__FINIT
+ .data
+ .align 4
+
+availmem:
+ .long 0
+m68k_pgtable_cachemode:
+ .long 0
+m68k_supervisor_cachemode:
+ .long 0
+#if defined(CONFIG_MVME16x)
+mvme_bdid:
+ .long 0,0,0,0,0,0,0,0
+#endif
+#if defined(CONFIG_Q40)
+q40_mem_cptr:
+ .long 0
+L(q40_do_debug):
+ .long 0
+#endif
diff --git a/arch/m68k/kernel/ints.c b/arch/m68k/kernel/ints.c
new file mode 100644
index 00000000000..514d323ad53
--- /dev/null
+++ b/arch/m68k/kernel/ints.c
@@ -0,0 +1,281 @@
+/*
+ * linux/arch/m68k/kernel/ints.c -- Linux/m68k general interrupt handling code
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of this archive
+ * for more details.
+ *
+ * 07/03/96: Timer initialization, and thus mach_sched_init(),
+ * removed from request_irq() and moved to init_time().
+ * We should therefore consider renaming our add_isr() and
+ * remove_isr() to request_irq() and free_irq()
+ * respectively, so they are compliant with the other
+ * architectures. /Jes
+ * 11/07/96: Changed all add_/remove_isr() to request_/free_irq() calls.
+ * Removed irq list support, if any machine needs an irq server
+ * it must implement this itself (as it's already done), instead
+ * only default handler are used with mach_default_handler.
+ * request_irq got some flags different from other architectures:
+ * - IRQ_FLG_REPLACE : Replace an existing handler (the default one
+ * can be replaced without this flag)
+ * - IRQ_FLG_LOCK : handler can't be replaced
+ * There are other machine depending flags, see there
+ * If you want to replace a default handler you should know what
+ * you're doing, since it might handle different other irq sources
+ * which must be served /Roman Zippel
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/kernel_stat.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+
+#include <asm/setup.h>
+#include <asm/system.h>
+#include <asm/irq.h>
+#include <asm/traps.h>
+#include <asm/page.h>
+#include <asm/machdep.h>
+
+#ifdef CONFIG_Q40
+#include <asm/q40ints.h>
+#endif
+
+/* table for system interrupt handlers */
+static irq_handler_t irq_list[SYS_IRQS];
+
+static const char *default_names[SYS_IRQS] = {
+ [0] = "spurious int",
+ [1] = "int1 handler",
+ [2] = "int2 handler",
+ [3] = "int3 handler",
+ [4] = "int4 handler",
+ [5] = "int5 handler",
+ [6] = "int6 handler",
+ [7] = "int7 handler"
+};
+
+/* The number of spurious interrupts */
+volatile unsigned int num_spurious;
+
+#define NUM_IRQ_NODES 100
+static irq_node_t nodes[NUM_IRQ_NODES];
+
+static void dummy_enable_irq(unsigned int irq);
+static void dummy_disable_irq(unsigned int irq);
+static int dummy_request_irq(unsigned int irq,
+ irqreturn_t (*handler) (int, void *, struct pt_regs *),
+ unsigned long flags, const char *devname, void *dev_id);
+static void dummy_free_irq(unsigned int irq, void *dev_id);
+
+void (*enable_irq) (unsigned int) = dummy_enable_irq;
+void (*disable_irq) (unsigned int) = dummy_disable_irq;
+
+int (*mach_request_irq) (unsigned int, irqreturn_t (*)(int, void *, struct pt_regs *),
+ unsigned long, const char *, void *) = dummy_request_irq;
+void (*mach_free_irq) (unsigned int, void *) = dummy_free_irq;
+
+void init_irq_proc(void);
+
+/*
+ * void init_IRQ(void)
+ *
+ * Parameters: None
+ *
+ * Returns: Nothing
+ *
+ * This function should be called during kernel startup to initialize
+ * the IRQ handling routines.
+ */
+
+void __init init_IRQ(void)
+{
+ int i;
+
+ for (i = 0; i < SYS_IRQS; i++) {
+ if (mach_default_handler)
+ irq_list[i].handler = (*mach_default_handler)[i];
+ irq_list[i].flags = 0;
+ irq_list[i].dev_id = NULL;
+ irq_list[i].devname = default_names[i];
+ }
+
+ for (i = 0; i < NUM_IRQ_NODES; i++)
+ nodes[i].handler = NULL;
+
+ mach_init_IRQ ();
+}
+
+irq_node_t *new_irq_node(void)
+{
+ irq_node_t *node;
+ short i;
+
+ for (node = nodes, i = NUM_IRQ_NODES-1; i >= 0; node++, i--)
+ if (!node->handler)
+ return node;
+
+ printk ("new_irq_node: out of nodes\n");
+ return NULL;
+}
+
+/*
+ * We will keep these functions until I have convinced Linus to move
+ * the declaration of them from include/linux/sched.h to
+ * include/asm/irq.h.
+ */
+int request_irq(unsigned int irq,
+ irqreturn_t (*handler) (int, void *, struct pt_regs *),
+ unsigned long flags, const char *devname, void *dev_id)
+{
+ return mach_request_irq(irq, handler, flags, devname, dev_id);
+}
+
+EXPORT_SYMBOL(request_irq);
+
+void free_irq(unsigned int irq, void *dev_id)
+{
+ mach_free_irq(irq, dev_id);
+}
+
+EXPORT_SYMBOL(free_irq);
+
+int cpu_request_irq(unsigned int irq,
+ irqreturn_t (*handler)(int, void *, struct pt_regs *),
+ unsigned long flags, const char *devname, void *dev_id)
+{
+ if (irq < IRQ1 || irq > IRQ7) {
+ printk("%s: Incorrect IRQ %d from %s\n",
+ __FUNCTION__, irq, devname);
+ return -ENXIO;
+ }
+
+#if 0
+ if (!(irq_list[irq].flags & IRQ_FLG_STD)) {
+ if (irq_list[irq].flags & IRQ_FLG_LOCK) {
+ printk("%s: IRQ %d from %s is not replaceable\n",
+ __FUNCTION__, irq, irq_list[irq].devname);
+ return -EBUSY;
+ }
+ if (!(flags & IRQ_FLG_REPLACE)) {
+ printk("%s: %s can't replace IRQ %d from %s\n",
+ __FUNCTION__, devname, irq, irq_list[irq].devname);
+ return -EBUSY;
+ }
+ }
+#endif
+
+ irq_list[irq].handler = handler;
+ irq_list[irq].flags = flags;
+ irq_list[irq].dev_id = dev_id;
+ irq_list[irq].devname = devname;
+ return 0;
+}
+
+void cpu_free_irq(unsigned int irq, void *dev_id)
+{
+ if (irq < IRQ1 || irq > IRQ7) {
+ printk("%s: Incorrect IRQ %d\n", __FUNCTION__, irq);
+ return;
+ }
+
+ if (irq_list[irq].dev_id != dev_id)
+ printk("%s: Removing probably wrong IRQ %d from %s\n",
+ __FUNCTION__, irq, irq_list[irq].devname);
+
+ irq_list[irq].handler = (*mach_default_handler)[irq];
+ irq_list[irq].flags = 0;
+ irq_list[irq].dev_id = NULL;
+ irq_list[irq].devname = default_names[irq];
+}
+
+/*
+ * Do we need these probe functions on the m68k?
+ *
+ * ... may be useful with ISA devices
+ */
+unsigned long probe_irq_on (void)
+{
+#ifdef CONFIG_Q40
+ if (MACH_IS_Q40)
+ return q40_probe_irq_on();
+#endif
+ return 0;
+}
+
+EXPORT_SYMBOL(probe_irq_on);
+
+int probe_irq_off (unsigned long irqs)
+{
+#ifdef CONFIG_Q40
+ if (MACH_IS_Q40)
+ return q40_probe_irq_off(irqs);
+#endif
+ return 0;
+}
+
+EXPORT_SYMBOL(probe_irq_off);
+
+static void dummy_enable_irq(unsigned int irq)
+{
+ printk("calling uninitialized enable_irq()\n");
+}
+
+static void dummy_disable_irq(unsigned int irq)
+{
+ printk("calling uninitialized disable_irq()\n");
+}
+
+static int dummy_request_irq(unsigned int irq,
+ irqreturn_t (*handler) (int, void *, struct pt_regs *),
+ unsigned long flags, const char *devname, void *dev_id)
+{
+ printk("calling uninitialized request_irq()\n");
+ return 0;
+}
+
+static void dummy_free_irq(unsigned int irq, void *dev_id)
+{
+ printk("calling uninitialized disable_irq()\n");
+}
+
+asmlinkage void process_int(unsigned long vec, struct pt_regs *fp)
+{
+ if (vec >= VEC_INT1 && vec <= VEC_INT7 && !MACH_IS_BVME6000) {
+ vec -= VEC_SPUR;
+ kstat_cpu(0).irqs[vec]++;
+ irq_list[vec].handler(vec, irq_list[vec].dev_id, fp);
+ } else {
+ if (mach_process_int)
+ mach_process_int(vec, fp);
+ else
+ panic("Can't process interrupt vector %ld\n", vec);
+ return;
+ }
+}
+
+int show_interrupts(struct seq_file *p, void *v)
+{
+ int i = *(loff_t *) v;
+
+ /* autovector interrupts */
+ if (i < SYS_IRQS) {
+ if (mach_default_handler) {
+ seq_printf(p, "auto %2d: %10u ", i,
+ i ? kstat_cpu(0).irqs[i] : num_spurious);
+ seq_puts(p, " ");
+ seq_printf(p, "%s\n", irq_list[i].devname);
+ }
+ } else if (i == SYS_IRQS)
+ mach_get_irq_list(p, v);
+ return 0;
+}
+
+void init_irq_proc(void)
+{
+ /* Insert /proc/irq driver here */
+}
+
diff --git a/arch/m68k/kernel/m68k_ksyms.c b/arch/m68k/kernel/m68k_ksyms.c
new file mode 100644
index 00000000000..fe837e31afb
--- /dev/null
+++ b/arch/m68k/kernel/m68k_ksyms.c
@@ -0,0 +1,88 @@
+#include <linux/module.h>
+#include <linux/linkage.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/user.h>
+#include <linux/elfcore.h>
+#include <linux/in6.h>
+#include <linux/interrupt.h>
+#include <linux/config.h>
+
+#include <asm/setup.h>
+#include <asm/machdep.h>
+#include <asm/pgalloc.h>
+#include <asm/irq.h>
+#include <asm/io.h>
+#include <asm/semaphore.h>
+#include <asm/checksum.h>
+
+asmlinkage long long __ashldi3 (long long, int);
+asmlinkage long long __ashrdi3 (long long, int);
+asmlinkage long long __lshrdi3 (long long, int);
+asmlinkage long long __muldi3 (long long, long long);
+extern char m68k_debug_device[];
+
+extern void dump_thread(struct pt_regs *, struct user *);
+
+/* platform dependent support */
+
+EXPORT_SYMBOL(m68k_machtype);
+EXPORT_SYMBOL(m68k_cputype);
+EXPORT_SYMBOL(m68k_is040or060);
+EXPORT_SYMBOL(m68k_realnum_memory);
+EXPORT_SYMBOL(m68k_memory);
+#ifndef CONFIG_SUN3
+EXPORT_SYMBOL(cache_push);
+EXPORT_SYMBOL(cache_clear);
+#ifndef CONFIG_SINGLE_MEMORY_CHUNK
+EXPORT_SYMBOL(mm_vtop);
+EXPORT_SYMBOL(mm_ptov);
+EXPORT_SYMBOL(mm_end_of_chunk);
+#else
+EXPORT_SYMBOL(m68k_memoffset);
+#endif /* !CONFIG_SINGLE_MEMORY_CHUNK */
+EXPORT_SYMBOL(__ioremap);
+EXPORT_SYMBOL(iounmap);
+EXPORT_SYMBOL(kernel_set_cachemode);
+#endif /* !CONFIG_SUN3 */
+EXPORT_SYMBOL(m68k_debug_device);
+EXPORT_SYMBOL(mach_hwclk);
+EXPORT_SYMBOL(mach_get_ss);
+EXPORT_SYMBOL(mach_get_rtc_pll);
+EXPORT_SYMBOL(mach_set_rtc_pll);
+#ifdef CONFIG_INPUT_M68K_BEEP_MODULE
+EXPORT_SYMBOL(mach_beep);
+#endif
+EXPORT_SYMBOL(dump_fpu);
+EXPORT_SYMBOL(dump_thread);
+EXPORT_SYMBOL(strnlen);
+EXPORT_SYMBOL(strrchr);
+EXPORT_SYMBOL(strstr);
+EXPORT_SYMBOL(strpbrk);
+EXPORT_SYMBOL(enable_irq);
+EXPORT_SYMBOL(disable_irq);
+EXPORT_SYMBOL(kernel_thread);
+#ifdef CONFIG_VME
+EXPORT_SYMBOL(vme_brdtype);
+#endif
+
+/* The following are special because they're not called
+ explicitly (the C compiler generates them). Fortunately,
+ their interface isn't gonna change any time soon now, so
+ it's OK to leave it out of version control. */
+EXPORT_SYMBOL(__ashldi3);
+EXPORT_SYMBOL(__ashrdi3);
+EXPORT_SYMBOL(__lshrdi3);
+EXPORT_SYMBOL(memcpy);
+EXPORT_SYMBOL(memset);
+EXPORT_SYMBOL(memcmp);
+EXPORT_SYMBOL(memscan);
+EXPORT_SYMBOL(__muldi3);
+
+EXPORT_SYMBOL(__down_failed);
+EXPORT_SYMBOL(__down_failed_interruptible);
+EXPORT_SYMBOL(__down_failed_trylock);
+EXPORT_SYMBOL(__up_wakeup);
+
+EXPORT_SYMBOL(get_wchan);
diff --git a/arch/m68k/kernel/module.c b/arch/m68k/kernel/module.c
new file mode 100644
index 00000000000..3b1a2ff61dd
--- /dev/null
+++ b/arch/m68k/kernel/module.c
@@ -0,0 +1,128 @@
+#include <linux/moduleloader.h>
+#include <linux/elf.h>
+#include <linux/vmalloc.h>
+#include <linux/fs.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+
+#if 0
+#define DEBUGP printk
+#else
+#define DEBUGP(fmt...)
+#endif
+
+void *module_alloc(unsigned long size)
+{
+ if (size == 0)
+ return NULL;
+ return vmalloc(size);
+}
+
+
+/* Free memory returned from module_alloc */
+void module_free(struct module *mod, void *module_region)
+{
+ vfree(module_region);
+ /* FIXME: If module_region == mod->init_region, trim exception
+ table entries. */
+}
+
+/* We don't need anything special. */
+int module_frob_arch_sections(Elf_Ehdr *hdr,
+ Elf_Shdr *sechdrs,
+ char *secstrings,
+ struct module *mod)
+{
+ return 0;
+}
+
+int apply_relocate(Elf32_Shdr *sechdrs,
+ const char *strtab,
+ unsigned int symindex,
+ unsigned int relsec,
+ struct module *me)
+{
+ unsigned int i;
+ Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
+ Elf32_Sym *sym;
+ uint32_t *location;
+
+ DEBUGP("Applying relocate section %u to %u\n", relsec,
+ sechdrs[relsec].sh_info);
+ for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
+ /* This is where to make the change */
+ location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
+ + rel[i].r_offset;
+ /* This is the symbol it is referring to. Note that all
+ undefined symbols have been resolved. */
+ sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
+ + ELF32_R_SYM(rel[i].r_info);
+
+ switch (ELF32_R_TYPE(rel[i].r_info)) {
+ case R_68K_32:
+ /* We add the value into the location given */
+ *location += sym->st_value;
+ break;
+ case R_68K_PC32:
+ /* Add the value, subtract its postition */
+ *location += sym->st_value - (uint32_t)location;
+ break;
+ default:
+ printk(KERN_ERR "module %s: Unknown relocation: %u\n",
+ me->name, ELF32_R_TYPE(rel[i].r_info));
+ return -ENOEXEC;
+ }
+ }
+ return 0;
+}
+
+int apply_relocate_add(Elf32_Shdr *sechdrs,
+ const char *strtab,
+ unsigned int symindex,
+ unsigned int relsec,
+ struct module *me)
+{
+ unsigned int i;
+ Elf32_Rela *rel = (void *)sechdrs[relsec].sh_addr;
+ Elf32_Sym *sym;
+ uint32_t *location;
+
+ DEBUGP("Applying relocate_add section %u to %u\n", relsec,
+ sechdrs[relsec].sh_info);
+ for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
+ /* This is where to make the change */
+ location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
+ + rel[i].r_offset;
+ /* This is the symbol it is referring to. Note that all
+ undefined symbols have been resolved. */
+ sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
+ + ELF32_R_SYM(rel[i].r_info);
+
+ switch (ELF32_R_TYPE(rel[i].r_info)) {
+ case R_68K_32:
+ /* We add the value into the location given */
+ *location = rel[i].r_addend + sym->st_value;
+ break;
+ case R_68K_PC32:
+ /* Add the value, subtract its postition */
+ *location = rel[i].r_addend + sym->st_value - (uint32_t)location;
+ break;
+ default:
+ printk(KERN_ERR "module %s: Unknown relocation: %u\n",
+ me->name, ELF32_R_TYPE(rel[i].r_info));
+ return -ENOEXEC;
+ }
+ }
+ return 0;
+}
+
+int module_finalize(const Elf_Ehdr *hdr,
+ const Elf_Shdr *sechdrs,
+ struct module *me)
+{
+ return 0;
+}
+
+void module_arch_cleanup(struct module *mod)
+{
+}
diff --git a/arch/m68k/kernel/process.c b/arch/m68k/kernel/process.c
new file mode 100644
index 00000000000..93b043e2a43
--- /dev/null
+++ b/arch/m68k/kernel/process.c
@@ -0,0 +1,405 @@
+/*
+ * linux/arch/m68k/kernel/process.c
+ *
+ * Copyright (C) 1995 Hamish Macdonald
+ *
+ * 68060 fixes by Jesper Skov
+ */
+
+/*
+ * This file handles the architecture-dependent parts of process handling..
+ */
+
+#include <linux/config.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/stddef.h>
+#include <linux/unistd.h>
+#include <linux/ptrace.h>
+#include <linux/slab.h>
+#include <linux/user.h>
+#include <linux/a.out.h>
+#include <linux/reboot.h>
+#include <linux/init_task.h>
+#include <linux/mqueue.h>
+
+#include <asm/uaccess.h>
+#include <asm/system.h>
+#include <asm/traps.h>
+#include <asm/machdep.h>
+#include <asm/setup.h>
+#include <asm/pgtable.h>
+
+/*
+ * Initial task/thread structure. Make this a per-architecture thing,
+ * because different architectures tend to have different
+ * alignment requirements and potentially different initial
+ * setup.
+ */
+static struct fs_struct init_fs = INIT_FS;
+static struct files_struct init_files = INIT_FILES;
+static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
+static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
+struct mm_struct init_mm = INIT_MM(init_mm);
+
+EXPORT_SYMBOL(init_mm);
+
+union thread_union init_thread_union
+__attribute__((section(".data.init_task"), aligned(THREAD_SIZE)))
+ = { INIT_THREAD_INFO(init_task) };
+
+/* initial task structure */
+struct task_struct init_task = INIT_TASK(init_task);
+
+EXPORT_SYMBOL(init_task);
+
+asmlinkage void ret_from_fork(void);
+
+
+/*
+ * Return saved PC from a blocked thread
+ */
+unsigned long thread_saved_pc(struct task_struct *tsk)
+{
+ struct switch_stack *sw = (struct switch_stack *)tsk->thread.ksp;
+ /* Check whether the thread is blocked in resume() */
+ if (in_sched_functions(sw->retpc))
+ return ((unsigned long *)sw->a6)[1];
+ else
+ return sw->retpc;
+}
+
+/*
+ * The idle loop on an m68k..
+ */
+void default_idle(void)
+{
+ if (!need_resched())
+#if defined(MACH_ATARI_ONLY) && !defined(CONFIG_HADES)
+ /* block out HSYNC on the atari (falcon) */
+ __asm__("stop #0x2200" : : : "cc");
+#else
+ __asm__("stop #0x2000" : : : "cc");
+#endif
+}
+
+void (*idle)(void) = default_idle;
+
+/*
+ * The idle thread. There's no useful work to be
+ * done, so just try to conserve power and have a
+ * low exit latency (ie sit in a loop waiting for
+ * somebody to say that they'd like to reschedule)
+ */
+void cpu_idle(void)
+{
+ /* endless idle loop with no priority at all */
+ while (1) {
+ while (!need_resched())
+ idle();
+ schedule();
+ }
+}
+
+void machine_restart(char * __unused)
+{
+ if (mach_reset)
+ mach_reset();
+ for (;;);
+}
+
+EXPORT_SYMBOL(machine_restart);
+
+void machine_halt(void)
+{
+ if (mach_halt)
+ mach_halt();
+ for (;;);
+}
+
+EXPORT_SYMBOL(machine_halt);
+
+void machine_power_off(void)
+{
+ if (mach_power_off)
+ mach_power_off();
+ for (;;);
+}
+
+EXPORT_SYMBOL(machine_power_off);
+
+void show_regs(struct pt_regs * regs)
+{
+ printk("\n");
+ printk("Format %02x Vector: %04x PC: %08lx Status: %04x %s\n",
+ regs->format, regs->vector, regs->pc, regs->sr, print_tainted());
+ printk("ORIG_D0: %08lx D0: %08lx A2: %08lx A1: %08lx\n",
+ regs->orig_d0, regs->d0, regs->a2, regs->a1);
+ printk("A0: %08lx D5: %08lx D4: %08lx\n",
+ regs->a0, regs->d5, regs->d4);
+ printk("D3: %08lx D2: %08lx D1: %08lx\n",
+ regs->d3, regs->d2, regs->d1);
+ if (!(regs->sr & PS_S))
+ printk("USP: %08lx\n", rdusp());
+}
+
+/*
+ * Create a kernel thread
+ */
+int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
+{
+ int pid;
+ mm_segment_t fs;
+
+ fs = get_fs();
+ set_fs (KERNEL_DS);
+
+ {
+ register long retval __asm__ ("d0");
+ register long clone_arg __asm__ ("d1") = flags | CLONE_VM | CLONE_UNTRACED;
+
+ retval = __NR_clone;
+ __asm__ __volatile__
+ ("clrl %%d2\n\t"
+ "trap #0\n\t" /* Linux/m68k system call */
+ "tstl %0\n\t" /* child or parent */
+ "jne 1f\n\t" /* parent - jump */
+ "lea %%sp@(%c7),%6\n\t" /* reload current */
+ "movel %6@,%6\n\t"
+ "movel %3,%%sp@-\n\t" /* push argument */
+ "jsr %4@\n\t" /* call fn */
+ "movel %0,%%d1\n\t" /* pass exit value */
+ "movel %2,%%d0\n\t" /* exit */
+ "trap #0\n"
+ "1:"
+ : "+d" (retval)
+ : "i" (__NR_clone), "i" (__NR_exit),
+ "r" (arg), "a" (fn), "d" (clone_arg), "r" (current),
+ "i" (-THREAD_SIZE)
+ : "d2");
+
+ pid = retval;
+ }
+
+ set_fs (fs);
+ return pid;
+}
+
+void flush_thread(void)
+{
+ unsigned long zero = 0;
+ set_fs(USER_DS);
+ current->thread.fs = __USER_DS;
+ if (!FPU_IS_EMU)
+ asm volatile (".chip 68k/68881\n\t"
+ "frestore %0@\n\t"
+ ".chip 68k" : : "a" (&zero));
+}
+
+/*
+ * "m68k_fork()".. By the time we get here, the
+ * non-volatile registers have also been saved on the
+ * stack. We do some ugly pointer stuff here.. (see
+ * also copy_thread)
+ */
+
+asmlinkage int m68k_fork(struct pt_regs *regs)
+{
+ return do_fork(SIGCHLD, rdusp(), regs, 0, NULL, NULL);
+}
+
+asmlinkage int m68k_vfork(struct pt_regs *regs)
+{
+ return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, rdusp(), regs, 0,
+ NULL, NULL);
+}
+
+asmlinkage int m68k_clone(struct pt_regs *regs)
+{
+ unsigned long clone_flags;
+ unsigned long newsp;
+ int *parent_tidptr, *child_tidptr;
+
+ /* syscall2 puts clone_flags in d1 and usp in d2 */
+ clone_flags = regs->d1;
+ newsp = regs->d2;
+ parent_tidptr = (int *)regs->d3;
+ child_tidptr = (int *)regs->d4;
+ if (!newsp)
+ newsp = rdusp();
+ return do_fork(clone_flags, newsp, regs, 0,
+ parent_tidptr, child_tidptr);
+}
+
+int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
+ unsigned long unused,
+ struct task_struct * p, struct pt_regs * regs)
+{
+ struct pt_regs * childregs;
+ struct switch_stack * childstack, *stack;
+ unsigned long stack_offset, *retp;
+
+ stack_offset = THREAD_SIZE - sizeof(struct pt_regs);
+ childregs = (struct pt_regs *) ((unsigned long) (p->thread_info) + stack_offset);
+
+ *childregs = *regs;
+ childregs->d0 = 0;
+
+ retp = ((unsigned long *) regs);
+ stack = ((struct switch_stack *) retp) - 1;
+
+ childstack = ((struct switch_stack *) childregs) - 1;
+ *childstack = *stack;
+ childstack->retpc = (unsigned long)ret_from_fork;
+
+ p->thread.usp = usp;
+ p->thread.ksp = (unsigned long)childstack;
+ /*
+ * Must save the current SFC/DFC value, NOT the value when
+ * the parent was last descheduled - RGH 10-08-96
+ */
+ p->thread.fs = get_fs().seg;
+
+ if (!FPU_IS_EMU) {
+ /* Copy the current fpu state */
+ asm volatile ("fsave %0" : : "m" (p->thread.fpstate[0]) : "memory");
+
+ if (!CPU_IS_060 ? p->thread.fpstate[0] : p->thread.fpstate[2])
+ asm volatile ("fmovemx %/fp0-%/fp7,%0\n\t"
+ "fmoveml %/fpiar/%/fpcr/%/fpsr,%1"
+ : : "m" (p->thread.fp[0]), "m" (p->thread.fpcntl[0])
+ : "memory");
+ /* Restore the state in case the fpu was busy */
+ asm volatile ("frestore %0" : : "m" (p->thread.fpstate[0]));
+ }
+
+ return 0;
+}
+
+/* Fill in the fpu structure for a core dump. */
+
+int dump_fpu (struct pt_regs *regs, struct user_m68kfp_struct *fpu)
+{
+ char fpustate[216];
+
+ if (FPU_IS_EMU) {
+ int i;
+
+ memcpy(fpu->fpcntl, current->thread.fpcntl, 12);
+ memcpy(fpu->fpregs, current->thread.fp, 96);
+ /* Convert internal fpu reg representation
+ * into long double format
+ */
+ for (i = 0; i < 24; i += 3)
+ fpu->fpregs[i] = ((fpu->fpregs[i] & 0xffff0000) << 15) |
+ ((fpu->fpregs[i] & 0x0000ffff) << 16);
+ return 1;
+ }
+
+ /* First dump the fpu context to avoid protocol violation. */
+ asm volatile ("fsave %0" :: "m" (fpustate[0]) : "memory");
+ if (!CPU_IS_060 ? !fpustate[0] : !fpustate[2])
+ return 0;
+
+ asm volatile ("fmovem %/fpiar/%/fpcr/%/fpsr,%0"
+ :: "m" (fpu->fpcntl[0])
+ : "memory");
+ asm volatile ("fmovemx %/fp0-%/fp7,%0"
+ :: "m" (fpu->fpregs[0])
+ : "memory");
+ return 1;
+}
+
+/*
+ * fill in the user structure for a core dump..
+ */
+void dump_thread(struct pt_regs * regs, struct user * dump)
+{
+ struct switch_stack *sw;
+
+/* changed the size calculations - should hopefully work better. lbt */
+ dump->magic = CMAGIC;
+ dump->start_code = 0;
+ dump->start_stack = rdusp() & ~(PAGE_SIZE - 1);
+ dump->u_tsize = ((unsigned long) current->mm->end_code) >> PAGE_SHIFT;
+ dump->u_dsize = ((unsigned long) (current->mm->brk +
+ (PAGE_SIZE-1))) >> PAGE_SHIFT;
+ dump->u_dsize -= dump->u_tsize;
+ dump->u_ssize = 0;
+
+ if (dump->start_stack < TASK_SIZE)
+ dump->u_ssize = ((unsigned long) (TASK_SIZE - dump->start_stack)) >> PAGE_SHIFT;
+
+ dump->u_ar0 = (struct user_regs_struct *)((int)&dump->regs - (int)dump);
+ sw = ((struct switch_stack *)regs) - 1;
+ dump->regs.d1 = regs->d1;
+ dump->regs.d2 = regs->d2;
+ dump->regs.d3 = regs->d3;
+ dump->regs.d4 = regs->d4;
+ dump->regs.d5 = regs->d5;
+ dump->regs.d6 = sw->d6;
+ dump->regs.d7 = sw->d7;
+ dump->regs.a0 = regs->a0;
+ dump->regs.a1 = regs->a1;
+ dump->regs.a2 = regs->a2;
+ dump->regs.a3 = sw->a3;
+ dump->regs.a4 = sw->a4;
+ dump->regs.a5 = sw->a5;
+ dump->regs.a6 = sw->a6;
+ dump->regs.d0 = regs->d0;
+ dump->regs.orig_d0 = regs->orig_d0;
+ dump->regs.stkadj = regs->stkadj;
+ dump->regs.sr = regs->sr;
+ dump->regs.pc = regs->pc;
+ dump->regs.fmtvec = (regs->format << 12) | regs->vector;
+ /* dump floating point stuff */
+ dump->u_fpvalid = dump_fpu (regs, &dump->m68kfp);
+}
+
+/*
+ * sys_execve() executes a new program.
+ */
+asmlinkage int sys_execve(char *name, char **argv, char **envp)
+{
+ int error;
+ char * filename;
+ struct pt_regs *regs = (struct pt_regs *) &name;
+
+ lock_kernel();
+ filename = getname(name);
+ error = PTR_ERR(filename);
+ if (IS_ERR(filename))
+ goto out;
+ error = do_execve(filename, argv, envp, regs);
+ putname(filename);
+out:
+ unlock_kernel();
+ return error;
+}
+
+unsigned long get_wchan(struct task_struct *p)
+{
+ unsigned long fp, pc;
+ unsigned long stack_page;
+ int count = 0;
+ if (!p || p == current || p->state == TASK_RUNNING)
+ return 0;
+
+ stack_page = (unsigned long)(p->thread_info);
+ fp = ((struct switch_stack *)p->thread.ksp)->a6;
+ do {
+ if (fp < stack_page+sizeof(struct thread_info) ||
+ fp >= 8184+stack_page)
+ return 0;
+ pc = ((unsigned long *)fp)[1];
+ if (!in_sched_functions(pc))
+ return pc;
+ fp = *(unsigned long *) fp;
+ } while (count++ < 16);
+ return 0;
+}
diff --git a/arch/m68k/kernel/ptrace.c b/arch/m68k/kernel/ptrace.c
new file mode 100644
index 00000000000..0beb53333ba
--- /dev/null
+++ b/arch/m68k/kernel/ptrace.c
@@ -0,0 +1,393 @@
+/*
+ * linux/arch/m68k/kernel/ptrace.c
+ *
+ * Copyright (C) 1994 by Hamish Macdonald
+ * Taken from linux/kernel/ptrace.c and modified for M680x0.
+ * linux/kernel/ptrace.c is by Ross Biro 1/23/92, edited by Linus Torvalds
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file COPYING in the main directory of
+ * this archive for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/errno.h>
+#include <linux/ptrace.h>
+#include <linux/user.h>
+#include <linux/config.h>
+
+#include <asm/uaccess.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/system.h>
+#include <asm/processor.h>
+
+/*
+ * does not yet catch signals sent when the child dies.
+ * in exit.c or in signal.c.
+ */
+
+/* determines which bits in the SR the user has access to. */
+/* 1 = access 0 = no access */
+#define SR_MASK 0x001f
+
+/* sets the trace bits. */
+#define TRACE_BITS 0x8000
+
+/* Find the stack offset for a register, relative to thread.esp0. */
+#define PT_REG(reg) ((long)&((struct pt_regs *)0)->reg)
+#define SW_REG(reg) ((long)&((struct switch_stack *)0)->reg \
+ - sizeof(struct switch_stack))
+/* Mapping from PT_xxx to the stack offset at which the register is
+ saved. Notice that usp has no stack-slot and needs to be treated
+ specially (see get_reg/put_reg below). */
+static int regoff[] = {
+ [0] = PT_REG(d1),
+ [1] = PT_REG(d2),
+ [2] = PT_REG(d3),
+ [3] = PT_REG(d4),
+ [4] = PT_REG(d5),
+ [5] = SW_REG(d6),
+ [6] = SW_REG(d7),
+ [7] = PT_REG(a0),
+ [8] = PT_REG(a1),
+ [9] = PT_REG(a2),
+ [10] = SW_REG(a3),
+ [11] = SW_REG(a4),
+ [12] = SW_REG(a5),
+ [13] = SW_REG(a6),
+ [14] = PT_REG(d0),
+ [15] = -1,
+ [16] = PT_REG(orig_d0),
+ [17] = PT_REG(sr),
+ [18] = PT_REG(pc),
+};
+
+/*
+ * Get contents of register REGNO in task TASK.
+ */
+static inline long get_reg(struct task_struct *task, int regno)
+{
+ unsigned long *addr;
+
+ if (regno == PT_USP)
+ addr = &task->thread.usp;
+ else if (regno < sizeof(regoff)/sizeof(regoff[0]))
+ addr = (unsigned long *)(task->thread.esp0 + regoff[regno]);
+ else
+ return 0;
+ return *addr;
+}
+
+/*
+ * Write contents of register REGNO in task TASK.
+ */
+static inline int put_reg(struct task_struct *task, int regno,
+ unsigned long data)
+{
+ unsigned long *addr;
+
+ if (regno == PT_USP)
+ addr = &task->thread.usp;
+ else if (regno < sizeof(regoff)/sizeof(regoff[0]))
+ addr = (unsigned long *) (task->thread.esp0 + regoff[regno]);
+ else
+ return -1;
+ *addr = data;
+ return 0;
+}
+
+/*
+ * Called by kernel/ptrace.c when detaching..
+ *
+ * Make sure the single step bit is not set.
+ */
+void ptrace_disable(struct task_struct *child)
+{
+ unsigned long tmp;
+ /* make sure the single step bit is not set. */
+ tmp = get_reg(child, PT_SR) & ~(TRACE_BITS << 16);
+ put_reg(child, PT_SR, tmp);
+ child->thread.work.delayed_trace = 0;
+ child->thread.work.syscall_trace = 0;
+}
+
+asmlinkage int sys_ptrace(long request, long pid, long addr, long data)
+{
+ struct task_struct *child;
+ int ret;
+
+ lock_kernel();
+ ret = -EPERM;
+ if (request == PTRACE_TRACEME) {
+ /* are we already being traced? */
+ if (current->ptrace & PT_PTRACED)
+ goto out;
+ /* set the ptrace bit in the process flags. */
+ current->ptrace |= PT_PTRACED;
+ ret = 0;
+ goto out;
+ }
+ ret = -ESRCH;
+ read_lock(&tasklist_lock);
+ child = find_task_by_pid(pid);
+ if (child)
+ get_task_struct(child);
+ read_unlock(&tasklist_lock);
+ if (!child)
+ goto out;
+
+ ret = -EPERM;
+ if (pid == 1) /* you may not mess with init */
+ goto out_tsk;
+
+ if (request == PTRACE_ATTACH) {
+ ret = ptrace_attach(child);
+ goto out_tsk;
+ }
+
+ ret = ptrace_check_attach(child, request == PTRACE_KILL);
+ if (ret < 0)
+ goto out_tsk;
+
+ switch (request) {
+ /* when I and D space are separate, these will need to be fixed. */
+ case PTRACE_PEEKTEXT: /* read word at location addr. */
+ case PTRACE_PEEKDATA: {
+ unsigned long tmp;
+ int copied;
+
+ copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
+ ret = -EIO;
+ if (copied != sizeof(tmp))
+ break;
+ ret = put_user(tmp,(unsigned long *) data);
+ break;
+ }
+
+ /* read the word at location addr in the USER area. */
+ case PTRACE_PEEKUSR: {
+ unsigned long tmp;
+
+ ret = -EIO;
+ if ((addr & 3) || addr < 0 ||
+ addr > sizeof(struct user) - 3)
+ break;
+
+ tmp = 0; /* Default return condition */
+ addr = addr >> 2; /* temporary hack. */
+ ret = -EIO;
+ if (addr < 19) {
+ tmp = get_reg(child, addr);
+ if (addr == PT_SR)
+ tmp >>= 16;
+ } else if (addr >= 21 && addr < 49) {
+ tmp = child->thread.fp[addr - 21];
+#ifdef CONFIG_M68KFPU_EMU
+ /* Convert internal fpu reg representation
+ * into long double format
+ */
+ if (FPU_IS_EMU && (addr < 45) && !(addr % 3))
+ tmp = ((tmp & 0xffff0000) << 15) |
+ ((tmp & 0x0000ffff) << 16);
+#endif
+ } else
+ break;
+ ret = put_user(tmp,(unsigned long *) data);
+ break;
+ }
+
+ /* when I and D space are separate, this will have to be fixed. */
+ case PTRACE_POKETEXT: /* write the word at location addr. */
+ case PTRACE_POKEDATA:
+ ret = 0;
+ if (access_process_vm(child, addr, &data, sizeof(data), 1) == sizeof(data))
+ break;
+ ret = -EIO;
+ break;
+
+ case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
+ ret = -EIO;
+ if ((addr & 3) || addr < 0 ||
+ addr > sizeof(struct user) - 3)
+ break;
+
+ addr = addr >> 2; /* temporary hack. */
+
+ if (addr == PT_SR) {
+ data &= SR_MASK;
+ data <<= 16;
+ data |= get_reg(child, PT_SR) & ~(SR_MASK << 16);
+ }
+ if (addr < 19) {
+ if (put_reg(child, addr, data))
+ break;
+ ret = 0;
+ break;
+ }
+ if (addr >= 21 && addr < 48)
+ {
+#ifdef CONFIG_M68KFPU_EMU
+ /* Convert long double format
+ * into internal fpu reg representation
+ */
+ if (FPU_IS_EMU && (addr < 45) && !(addr % 3)) {
+ data = (unsigned long)data << 15;
+ data = (data & 0xffff0000) |
+ ((data & 0x0000ffff) >> 1);
+ }
+#endif
+ child->thread.fp[addr - 21] = data;
+ ret = 0;
+ }
+ break;
+
+ case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */
+ case PTRACE_CONT: { /* restart after signal. */
+ long tmp;
+
+ ret = -EIO;
+ if ((unsigned long) data > _NSIG)
+ break;
+ if (request == PTRACE_SYSCALL) {
+ child->thread.work.syscall_trace = ~0;
+ } else {
+ child->thread.work.syscall_trace = 0;
+ }
+ child->exit_code = data;
+ /* make sure the single step bit is not set. */
+ tmp = get_reg(child, PT_SR) & ~(TRACE_BITS << 16);
+ put_reg(child, PT_SR, tmp);
+ child->thread.work.delayed_trace = 0;
+ wake_up_process(child);
+ ret = 0;
+ break;
+ }
+
+/*
+ * make the child exit. Best I can do is send it a sigkill.
+ * perhaps it should be put in the status that it wants to
+ * exit.
+ */
+ case PTRACE_KILL: {
+ long tmp;
+
+ ret = 0;
+ if (child->exit_state == EXIT_ZOMBIE) /* already dead */
+ break;
+ child->exit_code = SIGKILL;
+ /* make sure the single step bit is not set. */
+ tmp = get_reg(child, PT_SR) & ~(TRACE_BITS << 16);
+ put_reg(child, PT_SR, tmp);
+ child->thread.work.delayed_trace = 0;
+ wake_up_process(child);
+ break;
+ }
+
+ case PTRACE_SINGLESTEP: { /* set the trap flag. */
+ long tmp;
+
+ ret = -EIO;
+ if ((unsigned long) data > _NSIG)
+ break;
+ child->thread.work.syscall_trace = 0;
+ tmp = get_reg(child, PT_SR) | (TRACE_BITS << 16);
+ put_reg(child, PT_SR, tmp);
+ child->thread.work.delayed_trace = 1;
+
+ child->exit_code = data;
+ /* give it a chance to run. */
+ wake_up_process(child);
+ ret = 0;
+ break;
+ }
+
+ case PTRACE_DETACH: /* detach a process that was attached. */
+ ret = ptrace_detach(child, data);
+ break;
+
+ case PTRACE_GETREGS: { /* Get all gp regs from the child. */
+ int i;
+ unsigned long tmp;
+ for (i = 0; i < 19; i++) {
+ tmp = get_reg(child, i);
+ if (i == PT_SR)
+ tmp >>= 16;
+ if (put_user(tmp, (unsigned long *) data)) {
+ ret = -EFAULT;
+ break;
+ }
+ data += sizeof(long);
+ }
+ ret = 0;
+ break;
+ }
+
+ case PTRACE_SETREGS: { /* Set all gp regs in the child. */
+ int i;
+ unsigned long tmp;
+ for (i = 0; i < 19; i++) {
+ if (get_user(tmp, (unsigned long *) data)) {
+ ret = -EFAULT;
+ break;
+ }
+ if (i == PT_SR) {
+ tmp &= SR_MASK;
+ tmp <<= 16;
+ tmp |= get_reg(child, PT_SR) & ~(SR_MASK << 16);
+ }
+ put_reg(child, i, tmp);
+ data += sizeof(long);
+ }
+ ret = 0;
+ break;
+ }
+
+ case PTRACE_GETFPREGS: { /* Get the child FPU state. */
+ ret = 0;
+ if (copy_to_user((void *)data, &child->thread.fp,
+ sizeof(struct user_m68kfp_struct)))
+ ret = -EFAULT;
+ break;
+ }
+
+ case PTRACE_SETFPREGS: { /* Set the child FPU state. */
+ ret = 0;
+ if (copy_from_user(&child->thread.fp, (void *)data,
+ sizeof(struct user_m68kfp_struct)))
+ ret = -EFAULT;
+ break;
+ }
+
+ default:
+ ret = ptrace_request(child, request, addr, data);
+ break;
+ }
+out_tsk:
+ put_task_struct(child);
+out:
+ unlock_kernel();
+ return ret;
+}
+
+asmlinkage void syscall_trace(void)
+{
+ if (!current->thread.work.delayed_trace &&
+ !current->thread.work.syscall_trace)
+ return;
+ ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
+ ? 0x80 : 0));
+ /*
+ * this isn't the same as continuing with a signal, but it will do
+ * for normal use. strace only continues with a signal if the
+ * stopping signal is not SIGTRAP. -brl
+ */
+ if (current->exit_code) {
+ send_sig(current->exit_code, current, 1);
+ current->exit_code = 0;
+ }
+}
diff --git a/arch/m68k/kernel/semaphore.c b/arch/m68k/kernel/semaphore.c
new file mode 100644
index 00000000000..1ebb79baaa8
--- /dev/null
+++ b/arch/m68k/kernel/semaphore.c
@@ -0,0 +1,133 @@
+/*
+ * Generic semaphore code. Buyer beware. Do your own
+ * specific changes in <asm/semaphore-helper.h>
+ */
+
+#include <linux/config.h>
+#include <linux/sched.h>
+#include <linux/init.h>
+#include <asm/semaphore-helper.h>
+
+#ifndef CONFIG_RMW_INSNS
+spinlock_t semaphore_wake_lock;
+#endif
+
+/*
+ * Semaphores are implemented using a two-way counter:
+ * The "count" variable is decremented for each process
+ * that tries to sleep, while the "waking" variable is
+ * incremented when the "up()" code goes to wake up waiting
+ * processes.
+ *
+ * Notably, the inline "up()" and "down()" functions can
+ * efficiently test if they need to do any extra work (up
+ * needs to do something only if count was negative before
+ * the increment operation.
+ *
+ * waking_non_zero() (from asm/semaphore.h) must execute
+ * atomically.
+ *
+ * When __up() is called, the count was negative before
+ * incrementing it, and we need to wake up somebody.
+ *
+ * This routine adds one to the count of processes that need to
+ * wake up and exit. ALL waiting processes actually wake up but
+ * only the one that gets to the "waking" field first will gate
+ * through and acquire the semaphore. The others will go back
+ * to sleep.
+ *
+ * Note that these functions are only called when there is
+ * contention on the lock, and as such all this is the
+ * "non-critical" part of the whole semaphore business. The
+ * critical part is the inline stuff in <asm/semaphore.h>
+ * where we want to avoid any extra jumps and calls.
+ */
+void __up(struct semaphore *sem)
+{
+ wake_one_more(sem);
+ wake_up(&sem->wait);
+}
+
+/*
+ * Perform the "down" function. Return zero for semaphore acquired,
+ * return negative for signalled out of the function.
+ *
+ * If called from __down, the return is ignored and the wait loop is
+ * not interruptible. This means that a task waiting on a semaphore
+ * using "down()" cannot be killed until someone does an "up()" on
+ * the semaphore.
+ *
+ * If called from __down_interruptible, the return value gets checked
+ * upon return. If the return value is negative then the task continues
+ * with the negative value in the return register (it can be tested by
+ * the caller).
+ *
+ * Either form may be used in conjunction with "up()".
+ *
+ */
+
+
+#define DOWN_HEAD(task_state) \
+ \
+ \
+ current->state = (task_state); \
+ add_wait_queue(&sem->wait, &wait); \
+ \
+ /* \
+ * Ok, we're set up. sem->count is known to be less than zero \
+ * so we must wait. \
+ * \
+ * We can let go the lock for purposes of waiting. \
+ * We re-acquire it after awaking so as to protect \
+ * all semaphore operations. \
+ * \
+ * If "up()" is called before we call waking_non_zero() then \
+ * we will catch it right away. If it is called later then \
+ * we will have to go through a wakeup cycle to catch it. \
+ * \
+ * Multiple waiters contend for the semaphore lock to see \
+ * who gets to gate through and who has to wait some more. \
+ */ \
+ for (;;) {
+
+#define DOWN_TAIL(task_state) \
+ current->state = (task_state); \
+ } \
+ current->state = TASK_RUNNING; \
+ remove_wait_queue(&sem->wait, &wait);
+
+void __sched __down(struct semaphore * sem)
+{
+ DECLARE_WAITQUEUE(wait, current);
+
+ DOWN_HEAD(TASK_UNINTERRUPTIBLE)
+ if (waking_non_zero(sem))
+ break;
+ schedule();
+ DOWN_TAIL(TASK_UNINTERRUPTIBLE)
+}
+
+int __sched __down_interruptible(struct semaphore * sem)
+{
+ DECLARE_WAITQUEUE(wait, current);
+ int ret = 0;
+
+ DOWN_HEAD(TASK_INTERRUPTIBLE)
+
+ ret = waking_non_zero_interruptible(sem, current);
+ if (ret)
+ {
+ if (ret == 1)
+ /* ret != 0 only if we get interrupted -arca */
+ ret = 0;
+ break;
+ }
+ schedule();
+ DOWN_TAIL(TASK_INTERRUPTIBLE)
+ return ret;
+}
+
+int __down_trylock(struct semaphore * sem)
+{
+ return waking_non_zero_trylock(sem);
+}
diff --git a/arch/m68k/kernel/setup.c b/arch/m68k/kernel/setup.c
new file mode 100644
index 00000000000..d6ca99242e5
--- /dev/null
+++ b/arch/m68k/kernel/setup.c
@@ -0,0 +1,545 @@
+/*
+ * linux/arch/m68k/kernel/setup.c
+ *
+ * Copyright (C) 1995 Hamish Macdonald
+ */
+
+/*
+ * This file handles the architecture-dependent parts of system setup
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/fs.h>
+#include <linux/console.h>
+#include <linux/genhd.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/init.h>
+#include <linux/bootmem.h>
+#include <linux/seq_file.h>
+#include <linux/module.h>
+#include <linux/initrd.h>
+
+#include <asm/bootinfo.h>
+#include <asm/setup.h>
+#include <asm/irq.h>
+#include <asm/io.h>
+#include <asm/machdep.h>
+#ifdef CONFIG_AMIGA
+#include <asm/amigahw.h>
+#endif
+#ifdef CONFIG_ATARI
+#include <asm/atarihw.h>
+#include <asm/atari_stram.h>
+#endif
+#ifdef CONFIG_SUN3X
+#include <asm/dvma.h>
+#endif
+
+unsigned long m68k_machtype;
+unsigned long m68k_cputype;
+unsigned long m68k_fputype;
+unsigned long m68k_mmutype;
+#ifdef CONFIG_VME
+unsigned long vme_brdtype;
+#endif
+
+int m68k_is040or060;
+
+extern int end;
+extern unsigned long availmem;
+
+int m68k_num_memory;
+int m68k_realnum_memory;
+unsigned long m68k_memoffset;
+struct mem_info m68k_memory[NUM_MEMINFO];
+
+static struct mem_info m68k_ramdisk;
+
+static char m68k_command_line[CL_SIZE];
+
+char m68k_debug_device[6] = "";
+
+void (*mach_sched_init) (irqreturn_t (*handler)(int, void *, struct pt_regs *)) __initdata = NULL;
+/* machine dependent irq functions */
+void (*mach_init_IRQ) (void) __initdata = NULL;
+irqreturn_t (*(*mach_default_handler)[]) (int, void *, struct pt_regs *);
+void (*mach_get_model) (char *model);
+int (*mach_get_hardware_list) (char *buffer);
+int (*mach_get_irq_list) (struct seq_file *, void *);
+irqreturn_t (*mach_process_int) (int, struct pt_regs *);
+/* machine dependent timer functions */
+unsigned long (*mach_gettimeoffset) (void);
+int (*mach_hwclk) (int, struct rtc_time*);
+int (*mach_set_clock_mmss) (unsigned long);
+unsigned int (*mach_get_ss)(void);
+int (*mach_get_rtc_pll)(struct rtc_pll_info *);
+int (*mach_set_rtc_pll)(struct rtc_pll_info *);
+void (*mach_reset)( void );
+void (*mach_halt)( void );
+void (*mach_power_off)( void );
+long mach_max_dma_address = 0x00ffffff; /* default set to the lower 16MB */
+#if defined(CONFIG_AMIGA_FLOPPY) || defined(CONFIG_ATARI_FLOPPY)
+void (*mach_floppy_setup) (char *, int *) __initdata = NULL;
+#endif
+#ifdef CONFIG_HEARTBEAT
+void (*mach_heartbeat) (int);
+EXPORT_SYMBOL(mach_heartbeat);
+#endif
+#ifdef CONFIG_M68K_L2_CACHE
+void (*mach_l2_flush) (int);
+#endif
+#if defined(CONFIG_INPUT_M68K_BEEP) || defined(CONFIG_INPUT_M68K_BEEP_MODULE)
+void (*mach_beep)(unsigned int, unsigned int);
+#endif
+#if defined(CONFIG_ISA) && defined(MULTI_ISA)
+int isa_type;
+int isa_sex;
+#endif
+
+extern int amiga_parse_bootinfo(const struct bi_record *);
+extern int atari_parse_bootinfo(const struct bi_record *);
+extern int mac_parse_bootinfo(const struct bi_record *);
+extern int q40_parse_bootinfo(const struct bi_record *);
+extern int bvme6000_parse_bootinfo(const struct bi_record *);
+extern int mvme16x_parse_bootinfo(const struct bi_record *);
+extern int mvme147_parse_bootinfo(const struct bi_record *);
+extern int hp300_parse_bootinfo(const struct bi_record *);
+
+extern void config_amiga(void);
+extern void config_atari(void);
+extern void config_mac(void);
+extern void config_sun3(void);
+extern void config_apollo(void);
+extern void config_mvme147(void);
+extern void config_mvme16x(void);
+extern void config_bvme6000(void);
+extern void config_hp300(void);
+extern void config_q40(void);
+extern void config_sun3x(void);
+
+extern void mac_debugging_short (int, short);
+extern void mac_debugging_long (int, long);
+
+#define MASK_256K 0xfffc0000
+
+extern void paging_init(void);
+
+static void __init m68k_parse_bootinfo(const struct bi_record *record)
+{
+ while (record->tag != BI_LAST) {
+ int unknown = 0;
+ const unsigned long *data = record->data;
+ switch (record->tag) {
+ case BI_MACHTYPE:
+ case BI_CPUTYPE:
+ case BI_FPUTYPE:
+ case BI_MMUTYPE:
+ /* Already set up by head.S */
+ break;
+
+ case BI_MEMCHUNK:
+ if (m68k_num_memory < NUM_MEMINFO) {
+ m68k_memory[m68k_num_memory].addr = data[0];
+ m68k_memory[m68k_num_memory].size = data[1];
+ m68k_num_memory++;
+ } else
+ printk("m68k_parse_bootinfo: too many memory chunks\n");
+ break;
+
+ case BI_RAMDISK:
+ m68k_ramdisk.addr = data[0];
+ m68k_ramdisk.size = data[1];
+ break;
+
+ case BI_COMMAND_LINE:
+ strlcpy(m68k_command_line, (const char *)data, sizeof(m68k_command_line));
+ break;
+
+ default:
+ if (MACH_IS_AMIGA)
+ unknown = amiga_parse_bootinfo(record);
+ else if (MACH_IS_ATARI)
+ unknown = atari_parse_bootinfo(record);
+ else if (MACH_IS_MAC)
+ unknown = mac_parse_bootinfo(record);
+ else if (MACH_IS_Q40)
+ unknown = q40_parse_bootinfo(record);
+ else if (MACH_IS_BVME6000)
+ unknown = bvme6000_parse_bootinfo(record);
+ else if (MACH_IS_MVME16x)
+ unknown = mvme16x_parse_bootinfo(record);
+ else if (MACH_IS_MVME147)
+ unknown = mvme147_parse_bootinfo(record);
+ else if (MACH_IS_HP300)
+ unknown = hp300_parse_bootinfo(record);
+ else
+ unknown = 1;
+ }
+ if (unknown)
+ printk("m68k_parse_bootinfo: unknown tag 0x%04x ignored\n",
+ record->tag);
+ record = (struct bi_record *)((unsigned long)record+record->size);
+ }
+
+ m68k_realnum_memory = m68k_num_memory;
+#ifdef CONFIG_SINGLE_MEMORY_CHUNK
+ if (m68k_num_memory > 1) {
+ printk("Ignoring last %i chunks of physical memory\n",
+ (m68k_num_memory - 1));
+ m68k_num_memory = 1;
+ }
+ m68k_memoffset = m68k_memory[0].addr-PAGE_OFFSET;
+#endif
+}
+
+void __init setup_arch(char **cmdline_p)
+{
+ extern int _etext, _edata, _end;
+#ifndef CONFIG_SUN3
+ unsigned long endmem, startmem;
+#endif
+ int i;
+ char *p, *q;
+
+ /* The bootinfo is located right after the kernel bss */
+ m68k_parse_bootinfo((const struct bi_record *)&_end);
+
+ if (CPU_IS_040)
+ m68k_is040or060 = 4;
+ else if (CPU_IS_060)
+ m68k_is040or060 = 6;
+
+ /* FIXME: m68k_fputype is passed in by Penguin booter, which can
+ * be confused by software FPU emulation. BEWARE.
+ * We should really do our own FPU check at startup.
+ * [what do we do with buggy 68LC040s? if we have problems
+ * with them, we should add a test to check_bugs() below] */
+#ifndef CONFIG_M68KFPU_EMU_ONLY
+ /* clear the fpu if we have one */
+ if (m68k_fputype & (FPU_68881|FPU_68882|FPU_68040|FPU_68060)) {
+ volatile int zero = 0;
+ asm __volatile__ ("frestore %0" : : "m" (zero));
+ }
+#endif
+
+ if (CPU_IS_060) {
+ u32 pcr;
+
+ asm (".chip 68060; movec %%pcr,%0; .chip 68k"
+ : "=d" (pcr));
+ if (((pcr >> 8) & 0xff) <= 5) {
+ printk("Enabling workaround for errata I14\n");
+ asm (".chip 68060; movec %0,%%pcr; .chip 68k"
+ : : "d" (pcr | 0x20));
+ }
+ }
+
+ init_mm.start_code = PAGE_OFFSET;
+ init_mm.end_code = (unsigned long) &_etext;
+ init_mm.end_data = (unsigned long) &_edata;
+ init_mm.brk = (unsigned long) &_end;
+
+ *cmdline_p = m68k_command_line;
+ memcpy(saved_command_line, *cmdline_p, CL_SIZE);
+
+ /* Parse the command line for arch-specific options.
+ * For the m68k, this is currently only "debug=xxx" to enable printing
+ * certain kernel messages to some machine-specific device.
+ */
+ for( p = *cmdline_p; p && *p; ) {
+ i = 0;
+ if (!strncmp( p, "debug=", 6 )) {
+ strlcpy( m68k_debug_device, p+6, sizeof(m68k_debug_device) );
+ if ((q = strchr( m68k_debug_device, ' ' ))) *q = 0;
+ i = 1;
+ }
+#ifdef CONFIG_ATARI
+ /* This option must be parsed very early */
+ if (!strncmp( p, "switches=", 9 )) {
+ extern void atari_switches_setup( const char *, int );
+ atari_switches_setup( p+9, (q = strchr( p+9, ' ' )) ?
+ (q - (p+9)) : strlen(p+9) );
+ i = 1;
+ }
+#endif
+
+ if (i) {
+ /* option processed, delete it */
+ if ((q = strchr( p, ' ' )))
+ strcpy( p, q+1 );
+ else
+ *p = 0;
+ } else {
+ if ((p = strchr( p, ' ' ))) ++p;
+ }
+ }
+
+ switch (m68k_machtype) {
+#ifdef CONFIG_AMIGA
+ case MACH_AMIGA:
+ config_amiga();
+ break;
+#endif
+#ifdef CONFIG_ATARI
+ case MACH_ATARI:
+ config_atari();
+ break;
+#endif
+#ifdef CONFIG_MAC
+ case MACH_MAC:
+ config_mac();
+ break;
+#endif
+#ifdef CONFIG_SUN3
+ case MACH_SUN3:
+ config_sun3();
+ break;
+#endif
+#ifdef CONFIG_APOLLO
+ case MACH_APOLLO:
+ config_apollo();
+ break;
+#endif
+#ifdef CONFIG_MVME147
+ case MACH_MVME147:
+ config_mvme147();
+ break;
+#endif
+#ifdef CONFIG_MVME16x
+ case MACH_MVME16x:
+ config_mvme16x();
+ break;
+#endif
+#ifdef CONFIG_BVME6000
+ case MACH_BVME6000:
+ config_bvme6000();
+ break;
+#endif
+#ifdef CONFIG_HP300
+ case MACH_HP300:
+ config_hp300();
+ break;
+#endif
+#ifdef CONFIG_Q40
+ case MACH_Q40:
+ config_q40();
+ break;
+#endif
+#ifdef CONFIG_SUN3X
+ case MACH_SUN3X:
+ config_sun3x();
+ break;
+#endif
+ default:
+ panic ("No configuration setup");
+ }
+
+#ifndef CONFIG_SUN3
+ startmem= m68k_memory[0].addr;
+ endmem = startmem + m68k_memory[0].size;
+ high_memory = (void *)PAGE_OFFSET;
+ for (i = 0; i < m68k_num_memory; i++) {
+ m68k_memory[i].size &= MASK_256K;
+ if (m68k_memory[i].addr < startmem)
+ startmem = m68k_memory[i].addr;
+ if (m68k_memory[i].addr+m68k_memory[i].size > endmem)
+ endmem = m68k_memory[i].addr+m68k_memory[i].size;
+ high_memory += m68k_memory[i].size;
+ }
+
+ availmem += init_bootmem_node(NODE_DATA(0), availmem >> PAGE_SHIFT,
+ startmem >> PAGE_SHIFT, endmem >> PAGE_SHIFT);
+
+ for (i = 0; i < m68k_num_memory; i++)
+ free_bootmem(m68k_memory[i].addr, m68k_memory[i].size);
+
+ reserve_bootmem(m68k_memory[0].addr, availmem - m68k_memory[0].addr);
+
+#ifdef CONFIG_BLK_DEV_INITRD
+ if (m68k_ramdisk.size) {
+ reserve_bootmem(m68k_ramdisk.addr, m68k_ramdisk.size);
+ initrd_start = (unsigned long)phys_to_virt(m68k_ramdisk.addr);
+ initrd_end = initrd_start + m68k_ramdisk.size;
+ printk ("initrd: %08lx - %08lx\n", initrd_start, initrd_end);
+ }
+#endif
+
+#ifdef CONFIG_ATARI
+ if (MACH_IS_ATARI)
+ atari_stram_reserve_pages((void *)availmem);
+#endif
+#ifdef CONFIG_SUN3X
+ if (MACH_IS_SUN3X) {
+ dvma_init();
+ }
+#endif
+
+#endif /* !CONFIG_SUN3 */
+
+ paging_init();
+
+/* set ISA defs early as possible */
+#if defined(CONFIG_ISA) && defined(MULTI_ISA)
+#if defined(CONFIG_Q40)
+ if (MACH_IS_Q40) {
+ isa_type = Q40_ISA;
+ isa_sex = 0;
+ }
+#elif defined(CONFIG_GG2)
+ if (MACH_IS_AMIGA && AMIGAHW_PRESENT(GG2_ISA)){
+ isa_type = GG2_ISA;
+ isa_sex = 0;
+ }
+#elif defined(CONFIG_AMIGA_PCMCIA)
+ if (MACH_IS_AMIGA && AMIGAHW_PRESENT(PCMCIA)){
+ isa_type = AG_ISA;
+ isa_sex = 1;
+ }
+#endif
+#endif
+}
+
+static int show_cpuinfo(struct seq_file *m, void *v)
+{
+ const char *cpu, *mmu, *fpu;
+ unsigned long clockfreq, clockfactor;
+
+#define LOOP_CYCLES_68020 (8)
+#define LOOP_CYCLES_68030 (8)
+#define LOOP_CYCLES_68040 (3)
+#define LOOP_CYCLES_68060 (1)
+
+ if (CPU_IS_020) {
+ cpu = "68020";
+ clockfactor = LOOP_CYCLES_68020;
+ } else if (CPU_IS_030) {
+ cpu = "68030";
+ clockfactor = LOOP_CYCLES_68030;
+ } else if (CPU_IS_040) {
+ cpu = "68040";
+ clockfactor = LOOP_CYCLES_68040;
+ } else if (CPU_IS_060) {
+ cpu = "68060";
+ clockfactor = LOOP_CYCLES_68060;
+ } else {
+ cpu = "680x0";
+ clockfactor = 0;
+ }
+
+#ifdef CONFIG_M68KFPU_EMU_ONLY
+ fpu="none(soft float)";
+#else
+ if (m68k_fputype & FPU_68881)
+ fpu = "68881";
+ else if (m68k_fputype & FPU_68882)
+ fpu = "68882";
+ else if (m68k_fputype & FPU_68040)
+ fpu = "68040";
+ else if (m68k_fputype & FPU_68060)
+ fpu = "68060";
+ else if (m68k_fputype & FPU_SUNFPA)
+ fpu = "Sun FPA";
+ else
+ fpu = "none";
+#endif
+
+ if (m68k_mmutype & MMU_68851)
+ mmu = "68851";
+ else if (m68k_mmutype & MMU_68030)
+ mmu = "68030";
+ else if (m68k_mmutype & MMU_68040)
+ mmu = "68040";
+ else if (m68k_mmutype & MMU_68060)
+ mmu = "68060";
+ else if (m68k_mmutype & MMU_SUN3)
+ mmu = "Sun-3";
+ else if (m68k_mmutype & MMU_APOLLO)
+ mmu = "Apollo";
+ else
+ mmu = "unknown";
+
+ clockfreq = loops_per_jiffy*HZ*clockfactor;
+
+ seq_printf(m, "CPU:\t\t%s\n"
+ "MMU:\t\t%s\n"
+ "FPU:\t\t%s\n"
+ "Clocking:\t%lu.%1luMHz\n"
+ "BogoMips:\t%lu.%02lu\n"
+ "Calibration:\t%lu loops\n",
+ cpu, mmu, fpu,
+ clockfreq/1000000,(clockfreq/100000)%10,
+ loops_per_jiffy/(500000/HZ),(loops_per_jiffy/(5000/HZ))%100,
+ loops_per_jiffy);
+ return 0;
+}
+
+static void *c_start(struct seq_file *m, loff_t *pos)
+{
+ return *pos < 1 ? (void *)1 : NULL;
+}
+static void *c_next(struct seq_file *m, void *v, loff_t *pos)
+{
+ ++*pos;
+ return NULL;
+}
+static void c_stop(struct seq_file *m, void *v)
+{
+}
+struct seq_operations cpuinfo_op = {
+ .start = c_start,
+ .next = c_next,
+ .stop = c_stop,
+ .show = show_cpuinfo,
+};
+
+int get_hardware_list(char *buffer)
+{
+ int len = 0;
+ char model[80];
+ unsigned long mem;
+ int i;
+
+ if (mach_get_model)
+ mach_get_model(model);
+ else
+ strcpy(model, "Unknown m68k");
+
+ len += sprintf(buffer+len, "Model:\t\t%s\n", model);
+ for (mem = 0, i = 0; i < m68k_num_memory; i++)
+ mem += m68k_memory[i].size;
+ len += sprintf(buffer+len, "System Memory:\t%ldK\n", mem>>10);
+
+ if (mach_get_hardware_list)
+ len += mach_get_hardware_list(buffer+len);
+
+ return(len);
+}
+
+
+#if defined(CONFIG_AMIGA_FLOPPY) || defined(CONFIG_ATARI_FLOPPY)
+void __init floppy_setup(char *str, int *ints)
+{
+ if (mach_floppy_setup)
+ mach_floppy_setup (str, ints);
+}
+
+#endif
+
+void check_bugs(void)
+{
+#ifndef CONFIG_M68KFPU_EMU
+ if (m68k_fputype == 0) {
+ printk( KERN_EMERG "*** YOU DO NOT HAVE A FLOATING POINT UNIT, "
+ "WHICH IS REQUIRED BY LINUX/M68K ***\n" );
+ printk( KERN_EMERG "Upgrade your hardware or join the FPU "
+ "emulation project\n" );
+ panic( "no FPU" );
+ }
+#endif /* !CONFIG_M68KFPU_EMU */
+}
diff --git a/arch/m68k/kernel/signal.c b/arch/m68k/kernel/signal.c
new file mode 100644
index 00000000000..9c636a4c238
--- /dev/null
+++ b/arch/m68k/kernel/signal.c
@@ -0,0 +1,1025 @@
+/*
+ * linux/arch/m68k/kernel/signal.c
+ *
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of this archive
+ * for more details.
+ */
+
+/*
+ * Linux/m68k support by Hamish Macdonald
+ *
+ * 68060 fixes by Jesper Skov
+ *
+ * 1997-12-01 Modified for POSIX.1b signals by Andreas Schwab
+ *
+ * mathemu support by Roman Zippel
+ * (Note: fpstate in the signal context is completely ignored for the emulator
+ * and the internal floating point format is put on stack)
+ */
+
+/*
+ * ++roman (07/09/96): implemented signal stacks (specially for tosemu on
+ * Atari :-) Current limitation: Only one sigstack can be active at one time.
+ * If a second signal with SA_ONSTACK set arrives while working on a sigstack,
+ * SA_ONSTACK is ignored. This behaviour avoids lots of trouble with nested
+ * signal handlers!
+ */
+
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/kernel.h>
+#include <linux/signal.h>
+#include <linux/syscalls.h>
+#include <linux/errno.h>
+#include <linux/wait.h>
+#include <linux/ptrace.h>
+#include <linux/unistd.h>
+#include <linux/stddef.h>
+#include <linux/highuid.h>
+#include <linux/personality.h>
+#include <linux/tty.h>
+#include <linux/binfmts.h>
+
+#include <asm/setup.h>
+#include <asm/uaccess.h>
+#include <asm/pgtable.h>
+#include <asm/traps.h>
+#include <asm/ucontext.h>
+
+#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
+
+asmlinkage int do_signal(sigset_t *oldset, struct pt_regs *regs);
+
+const int frame_extra_sizes[16] = {
+ [1] = -1, /* sizeof(((struct frame *)0)->un.fmt1), */
+ [2] = sizeof(((struct frame *)0)->un.fmt2),
+ [3] = sizeof(((struct frame *)0)->un.fmt3),
+ [4] = sizeof(((struct frame *)0)->un.fmt4),
+ [5] = -1, /* sizeof(((struct frame *)0)->un.fmt5), */
+ [6] = -1, /* sizeof(((struct frame *)0)->un.fmt6), */
+ [7] = sizeof(((struct frame *)0)->un.fmt7),
+ [8] = -1, /* sizeof(((struct frame *)0)->un.fmt8), */
+ [9] = sizeof(((struct frame *)0)->un.fmt9),
+ [10] = sizeof(((struct frame *)0)->un.fmta),
+ [11] = sizeof(((struct frame *)0)->un.fmtb),
+ [12] = -1, /* sizeof(((struct frame *)0)->un.fmtc), */
+ [13] = -1, /* sizeof(((struct frame *)0)->un.fmtd), */
+ [14] = -1, /* sizeof(((struct frame *)0)->un.fmte), */
+ [15] = -1, /* sizeof(((struct frame *)0)->un.fmtf), */
+};
+
+/*
+ * Atomically swap in the new signal mask, and wait for a signal.
+ */
+asmlinkage int do_sigsuspend(struct pt_regs *regs)
+{
+ old_sigset_t mask = regs->d3;
+ sigset_t saveset;
+
+ mask &= _BLOCKABLE;
+ saveset = current->blocked;
+ siginitset(&current->blocked, mask);
+ recalc_sigpending();
+
+ regs->d0 = -EINTR;
+ while (1) {
+ current->state = TASK_INTERRUPTIBLE;
+ schedule();
+ if (do_signal(&saveset, regs))
+ return -EINTR;
+ }
+}
+
+asmlinkage int
+do_rt_sigsuspend(struct pt_regs *regs)
+{
+ sigset_t *unewset = (sigset_t *)regs->d1;
+ size_t sigsetsize = (size_t)regs->d2;
+ sigset_t saveset, newset;
+
+ /* XXX: Don't preclude handling different sized sigset_t's. */
+ if (sigsetsize != sizeof(sigset_t))
+ return -EINVAL;
+
+ if (copy_from_user(&newset, unewset, sizeof(newset)))
+ return -EFAULT;
+ sigdelsetmask(&newset, ~_BLOCKABLE);
+
+ saveset = current->blocked;
+ current->blocked = newset;
+ recalc_sigpending();
+
+ regs->d0 = -EINTR;
+ while (1) {
+ current->state = TASK_INTERRUPTIBLE;
+ schedule();
+ if (do_signal(&saveset, regs))
+ return -EINTR;
+ }
+}
+
+asmlinkage int
+sys_sigaction(int sig, const struct old_sigaction *act,
+ struct old_sigaction *oact)
+{
+ struct k_sigaction new_ka, old_ka;
+ int ret;
+
+ if (act) {
+ old_sigset_t mask;
+ if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
+ __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
+ __get_user(new_ka.sa.sa_restorer, &act->sa_restorer))
+ return -EFAULT;
+ __get_user(new_ka.sa.sa_flags, &act->sa_flags);
+ __get_user(mask, &act->sa_mask);
+ siginitset(&new_ka.sa.sa_mask, mask);
+ }
+
+ ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
+
+ if (!ret && oact) {
+ if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
+ __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
+ __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer))
+ return -EFAULT;
+ __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
+ __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
+ }
+
+ return ret;
+}
+
+asmlinkage int
+sys_sigaltstack(const stack_t *uss, stack_t *uoss)
+{
+ return do_sigaltstack(uss, uoss, rdusp());
+}
+
+
+/*
+ * Do a signal return; undo the signal stack.
+ *
+ * Keep the return code on the stack quadword aligned!
+ * That makes the cache flush below easier.
+ */
+
+struct sigframe
+{
+ char *pretcode;
+ int sig;
+ int code;
+ struct sigcontext *psc;
+ char retcode[8];
+ unsigned long extramask[_NSIG_WORDS-1];
+ struct sigcontext sc;
+};
+
+struct rt_sigframe
+{
+ char *pretcode;
+ int sig;
+ struct siginfo *pinfo;
+ void *puc;
+ char retcode[8];
+ struct siginfo info;
+ struct ucontext uc;
+};
+
+
+static unsigned char fpu_version; /* version number of fpu, set by setup_frame */
+
+static inline int restore_fpu_state(struct sigcontext *sc)
+{
+ int err = 1;
+
+ if (FPU_IS_EMU) {
+ /* restore registers */
+ memcpy(current->thread.fpcntl, sc->sc_fpcntl, 12);
+ memcpy(current->thread.fp, sc->sc_fpregs, 24);
+ return 0;
+ }
+
+ if (CPU_IS_060 ? sc->sc_fpstate[2] : sc->sc_fpstate[0]) {
+ /* Verify the frame format. */
+ if (!CPU_IS_060 && (sc->sc_fpstate[0] != fpu_version))
+ goto out;
+ if (CPU_IS_020_OR_030) {
+ if (m68k_fputype & FPU_68881 &&
+ !(sc->sc_fpstate[1] == 0x18 || sc->sc_fpstate[1] == 0xb4))
+ goto out;
+ if (m68k_fputype & FPU_68882 &&
+ !(sc->sc_fpstate[1] == 0x38 || sc->sc_fpstate[1] == 0xd4))
+ goto out;
+ } else if (CPU_IS_040) {
+ if (!(sc->sc_fpstate[1] == 0x00 ||
+ sc->sc_fpstate[1] == 0x28 ||
+ sc->sc_fpstate[1] == 0x60))
+ goto out;
+ } else if (CPU_IS_060) {
+ if (!(sc->sc_fpstate[3] == 0x00 ||
+ sc->sc_fpstate[3] == 0x60 ||
+ sc->sc_fpstate[3] == 0xe0))
+ goto out;
+ } else
+ goto out;
+
+ __asm__ volatile (".chip 68k/68881\n\t"
+ "fmovemx %0,%%fp0-%%fp1\n\t"
+ "fmoveml %1,%%fpcr/%%fpsr/%%fpiar\n\t"
+ ".chip 68k"
+ : /* no outputs */
+ : "m" (*sc->sc_fpregs), "m" (*sc->sc_fpcntl));
+ }
+ __asm__ volatile (".chip 68k/68881\n\t"
+ "frestore %0\n\t"
+ ".chip 68k" : : "m" (*sc->sc_fpstate));
+ err = 0;
+
+out:
+ return err;
+}
+
+#define FPCONTEXT_SIZE 216
+#define uc_fpstate uc_filler[0]
+#define uc_formatvec uc_filler[FPCONTEXT_SIZE/4]
+#define uc_extra uc_filler[FPCONTEXT_SIZE/4+1]
+
+static inline int rt_restore_fpu_state(struct ucontext *uc)
+{
+ unsigned char fpstate[FPCONTEXT_SIZE];
+ int context_size = CPU_IS_060 ? 8 : 0;
+ fpregset_t fpregs;
+ int err = 1;
+
+ if (FPU_IS_EMU) {
+ /* restore fpu control register */
+ if (__copy_from_user(current->thread.fpcntl,
+ uc->uc_mcontext.fpregs.f_fpcntl, 12))
+ goto out;
+ /* restore all other fpu register */
+ if (__copy_from_user(current->thread.fp,
+ uc->uc_mcontext.fpregs.f_fpregs, 96))
+ goto out;
+ return 0;
+ }
+
+ if (__get_user(*(long *)fpstate, (long *)&uc->uc_fpstate))
+ goto out;
+ if (CPU_IS_060 ? fpstate[2] : fpstate[0]) {
+ if (!CPU_IS_060)
+ context_size = fpstate[1];
+ /* Verify the frame format. */
+ if (!CPU_IS_060 && (fpstate[0] != fpu_version))
+ goto out;
+ if (CPU_IS_020_OR_030) {
+ if (m68k_fputype & FPU_68881 &&
+ !(context_size == 0x18 || context_size == 0xb4))
+ goto out;
+ if (m68k_fputype & FPU_68882 &&
+ !(context_size == 0x38 || context_size == 0xd4))
+ goto out;
+ } else if (CPU_IS_040) {
+ if (!(context_size == 0x00 ||
+ context_size == 0x28 ||
+ context_size == 0x60))
+ goto out;
+ } else if (CPU_IS_060) {
+ if (!(fpstate[3] == 0x00 ||
+ fpstate[3] == 0x60 ||
+ fpstate[3] == 0xe0))
+ goto out;
+ } else
+ goto out;
+ if (__copy_from_user(&fpregs, &uc->uc_mcontext.fpregs,
+ sizeof(fpregs)))
+ goto out;
+ __asm__ volatile (".chip 68k/68881\n\t"
+ "fmovemx %0,%%fp0-%%fp7\n\t"
+ "fmoveml %1,%%fpcr/%%fpsr/%%fpiar\n\t"
+ ".chip 68k"
+ : /* no outputs */
+ : "m" (*fpregs.f_fpregs),
+ "m" (*fpregs.f_fpcntl));
+ }
+ if (context_size &&
+ __copy_from_user(fpstate + 4, (long *)&uc->uc_fpstate + 1,
+ context_size))
+ goto out;
+ __asm__ volatile (".chip 68k/68881\n\t"
+ "frestore %0\n\t"
+ ".chip 68k" : : "m" (*fpstate));
+ err = 0;
+
+out:
+ return err;
+}
+
+static inline int
+restore_sigcontext(struct pt_regs *regs, struct sigcontext *usc, void *fp,
+ int *pd0)
+{
+ int fsize, formatvec;
+ struct sigcontext context;
+ int err;
+
+ /* get previous context */
+ if (copy_from_user(&context, usc, sizeof(context)))
+ goto badframe;
+
+ /* restore passed registers */
+ regs->d1 = context.sc_d1;
+ regs->a0 = context.sc_a0;
+ regs->a1 = context.sc_a1;
+ regs->sr = (regs->sr & 0xff00) | (context.sc_sr & 0xff);
+ regs->pc = context.sc_pc;
+ regs->orig_d0 = -1; /* disable syscall checks */
+ wrusp(context.sc_usp);
+ formatvec = context.sc_formatvec;
+ regs->format = formatvec >> 12;
+ regs->vector = formatvec & 0xfff;
+
+ err = restore_fpu_state(&context);
+
+ fsize = frame_extra_sizes[regs->format];
+ if (fsize < 0) {
+ /*
+ * user process trying to return with weird frame format
+ */
+#ifdef DEBUG
+ printk("user process returning with weird frame format\n");
+#endif
+ goto badframe;
+ }
+
+ /* OK. Make room on the supervisor stack for the extra junk,
+ * if necessary.
+ */
+
+ if (fsize) {
+ struct switch_stack *sw = (struct switch_stack *)regs - 1;
+ regs->d0 = context.sc_d0;
+#define frame_offset (sizeof(struct pt_regs)+sizeof(struct switch_stack))
+ __asm__ __volatile__
+ (" movel %0,%/a0\n\t"
+ " subl %1,%/a0\n\t" /* make room on stack */
+ " movel %/a0,%/sp\n\t" /* set stack pointer */
+ /* move switch_stack and pt_regs */
+ "1: movel %0@+,%/a0@+\n\t"
+ " dbra %2,1b\n\t"
+ " lea %/sp@(%c3),%/a0\n\t" /* add offset of fmt */
+ " lsrl #2,%1\n\t"
+ " subql #1,%1\n\t"
+ "2: movesl %4@+,%2\n\t"
+ "3: movel %2,%/a0@+\n\t"
+ " dbra %1,2b\n\t"
+ " bral ret_from_signal\n"
+ "4:\n"
+ ".section __ex_table,\"a\"\n"
+ " .align 4\n"
+ " .long 2b,4b\n"
+ " .long 3b,4b\n"
+ ".previous"
+ : /* no outputs, it doesn't ever return */
+ : "a" (sw), "d" (fsize), "d" (frame_offset/4-1),
+ "n" (frame_offset), "a" (fp)
+ : "a0");
+#undef frame_offset
+ /*
+ * If we ever get here an exception occurred while
+ * building the above stack-frame.
+ */
+ goto badframe;
+ }
+
+ *pd0 = context.sc_d0;
+ return err;
+
+badframe:
+ return 1;
+}
+
+static inline int
+rt_restore_ucontext(struct pt_regs *regs, struct switch_stack *sw,
+ struct ucontext *uc, int *pd0)
+{
+ int fsize, temp;
+ greg_t *gregs = uc->uc_mcontext.gregs;
+ unsigned long usp;
+ int err;
+
+ err = __get_user(temp, &uc->uc_mcontext.version);
+ if (temp != MCONTEXT_VERSION)
+ goto badframe;
+ /* restore passed registers */
+ err |= __get_user(regs->d0, &gregs[0]);
+ err |= __get_user(regs->d1, &gregs[1]);
+ err |= __get_user(regs->d2, &gregs[2]);
+ err |= __get_user(regs->d3, &gregs[3]);
+ err |= __get_user(regs->d4, &gregs[4]);
+ err |= __get_user(regs->d5, &gregs[5]);
+ err |= __get_user(sw->d6, &gregs[6]);
+ err |= __get_user(sw->d7, &gregs[7]);
+ err |= __get_user(regs->a0, &gregs[8]);
+ err |= __get_user(regs->a1, &gregs[9]);
+ err |= __get_user(regs->a2, &gregs[10]);
+ err |= __get_user(sw->a3, &gregs[11]);
+ err |= __get_user(sw->a4, &gregs[12]);
+ err |= __get_user(sw->a5, &gregs[13]);
+ err |= __get_user(sw->a6, &gregs[14]);
+ err |= __get_user(usp, &gregs[15]);
+ wrusp(usp);
+ err |= __get_user(regs->pc, &gregs[16]);
+ err |= __get_user(temp, &gregs[17]);
+ regs->sr = (regs->sr & 0xff00) | (temp & 0xff);
+ regs->orig_d0 = -1; /* disable syscall checks */
+ err |= __get_user(temp, &uc->uc_formatvec);
+ regs->format = temp >> 12;
+ regs->vector = temp & 0xfff;
+
+ err |= rt_restore_fpu_state(uc);
+
+ if (do_sigaltstack(&uc->uc_stack, NULL, usp) == -EFAULT)
+ goto badframe;
+
+ fsize = frame_extra_sizes[regs->format];
+ if (fsize < 0) {
+ /*
+ * user process trying to return with weird frame format
+ */
+#ifdef DEBUG
+ printk("user process returning with weird frame format\n");
+#endif
+ goto badframe;
+ }
+
+ /* OK. Make room on the supervisor stack for the extra junk,
+ * if necessary.
+ */
+
+ if (fsize) {
+#define frame_offset (sizeof(struct pt_regs)+sizeof(struct switch_stack))
+ __asm__ __volatile__
+ (" movel %0,%/a0\n\t"
+ " subl %1,%/a0\n\t" /* make room on stack */
+ " movel %/a0,%/sp\n\t" /* set stack pointer */
+ /* move switch_stack and pt_regs */
+ "1: movel %0@+,%/a0@+\n\t"
+ " dbra %2,1b\n\t"
+ " lea %/sp@(%c3),%/a0\n\t" /* add offset of fmt */
+ " lsrl #2,%1\n\t"
+ " subql #1,%1\n\t"
+ "2: movesl %4@+,%2\n\t"
+ "3: movel %2,%/a0@+\n\t"
+ " dbra %1,2b\n\t"
+ " bral ret_from_signal\n"
+ "4:\n"
+ ".section __ex_table,\"a\"\n"
+ " .align 4\n"
+ " .long 2b,4b\n"
+ " .long 3b,4b\n"
+ ".previous"
+ : /* no outputs, it doesn't ever return */
+ : "a" (sw), "d" (fsize), "d" (frame_offset/4-1),
+ "n" (frame_offset), "a" (&uc->uc_extra)
+ : "a0");
+#undef frame_offset
+ /*
+ * If we ever get here an exception occurred while
+ * building the above stack-frame.
+ */
+ goto badframe;
+ }
+
+ *pd0 = regs->d0;
+ return err;
+
+badframe:
+ return 1;
+}
+
+asmlinkage int do_sigreturn(unsigned long __unused)
+{
+ struct switch_stack *sw = (struct switch_stack *) &__unused;
+ struct pt_regs *regs = (struct pt_regs *) (sw + 1);
+ unsigned long usp = rdusp();
+ struct sigframe *frame = (struct sigframe *)(usp - 4);
+ sigset_t set;
+ int d0;
+
+ if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
+ goto badframe;
+ if (__get_user(set.sig[0], &frame->sc.sc_mask) ||
+ (_NSIG_WORDS > 1 &&
+ __copy_from_user(&set.sig[1], &frame->extramask,
+ sizeof(frame->extramask))))
+ goto badframe;
+
+ sigdelsetmask(&set, ~_BLOCKABLE);
+ current->blocked = set;
+ recalc_sigpending();
+
+ if (restore_sigcontext(regs, &frame->sc, frame + 1, &d0))
+ goto badframe;
+ return d0;
+
+badframe:
+ force_sig(SIGSEGV, current);
+ return 0;
+}
+
+asmlinkage int do_rt_sigreturn(unsigned long __unused)
+{
+ struct switch_stack *sw = (struct switch_stack *) &__unused;
+ struct pt_regs *regs = (struct pt_regs *) (sw + 1);
+ unsigned long usp = rdusp();
+ struct rt_sigframe *frame = (struct rt_sigframe *)(usp - 4);
+ sigset_t set;
+ int d0;
+
+ if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
+ goto badframe;
+ if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
+ goto badframe;
+
+ sigdelsetmask(&set, ~_BLOCKABLE);
+ current->blocked = set;
+ recalc_sigpending();
+
+ if (rt_restore_ucontext(regs, sw, &frame->uc, &d0))
+ goto badframe;
+ return d0;
+
+badframe:
+ force_sig(SIGSEGV, current);
+ return 0;
+}
+
+/*
+ * Set up a signal frame.
+ */
+
+static inline void save_fpu_state(struct sigcontext *sc, struct pt_regs *regs)
+{
+ if (FPU_IS_EMU) {
+ /* save registers */
+ memcpy(sc->sc_fpcntl, current->thread.fpcntl, 12);
+ memcpy(sc->sc_fpregs, current->thread.fp, 24);
+ return;
+ }
+
+ __asm__ volatile (".chip 68k/68881\n\t"
+ "fsave %0\n\t"
+ ".chip 68k"
+ : : "m" (*sc->sc_fpstate) : "memory");
+
+ if (CPU_IS_060 ? sc->sc_fpstate[2] : sc->sc_fpstate[0]) {
+ fpu_version = sc->sc_fpstate[0];
+ if (CPU_IS_020_OR_030 &&
+ regs->vector >= (VEC_FPBRUC * 4) &&
+ regs->vector <= (VEC_FPNAN * 4)) {
+ /* Clear pending exception in 68882 idle frame */
+ if (*(unsigned short *) sc->sc_fpstate == 0x1f38)
+ sc->sc_fpstate[0x38] |= 1 << 3;
+ }
+ __asm__ volatile (".chip 68k/68881\n\t"
+ "fmovemx %%fp0-%%fp1,%0\n\t"
+ "fmoveml %%fpcr/%%fpsr/%%fpiar,%1\n\t"
+ ".chip 68k"
+ : "=m" (*sc->sc_fpregs),
+ "=m" (*sc->sc_fpcntl)
+ : /* no inputs */
+ : "memory");
+ }
+}
+
+static inline int rt_save_fpu_state(struct ucontext *uc, struct pt_regs *regs)
+{
+ unsigned char fpstate[FPCONTEXT_SIZE];
+ int context_size = CPU_IS_060 ? 8 : 0;
+ int err = 0;
+
+ if (FPU_IS_EMU) {
+ /* save fpu control register */
+ err |= copy_to_user(uc->uc_mcontext.fpregs.f_fpcntl,
+ current->thread.fpcntl, 12);
+ /* save all other fpu register */
+ err |= copy_to_user(uc->uc_mcontext.fpregs.f_fpregs,
+ current->thread.fp, 96);
+ return err;
+ }
+
+ __asm__ volatile (".chip 68k/68881\n\t"
+ "fsave %0\n\t"
+ ".chip 68k"
+ : : "m" (*fpstate) : "memory");
+
+ err |= __put_user(*(long *)fpstate, (long *)&uc->uc_fpstate);
+ if (CPU_IS_060 ? fpstate[2] : fpstate[0]) {
+ fpregset_t fpregs;
+ if (!CPU_IS_060)
+ context_size = fpstate[1];
+ fpu_version = fpstate[0];
+ if (CPU_IS_020_OR_030 &&
+ regs->vector >= (VEC_FPBRUC * 4) &&
+ regs->vector <= (VEC_FPNAN * 4)) {
+ /* Clear pending exception in 68882 idle frame */
+ if (*(unsigned short *) fpstate == 0x1f38)
+ fpstate[0x38] |= 1 << 3;
+ }
+ __asm__ volatile (".chip 68k/68881\n\t"
+ "fmovemx %%fp0-%%fp7,%0\n\t"
+ "fmoveml %%fpcr/%%fpsr/%%fpiar,%1\n\t"
+ ".chip 68k"
+ : "=m" (*fpregs.f_fpregs),
+ "=m" (*fpregs.f_fpcntl)
+ : /* no inputs */
+ : "memory");
+ err |= copy_to_user(&uc->uc_mcontext.fpregs, &fpregs,
+ sizeof(fpregs));
+ }
+ if (context_size)
+ err |= copy_to_user((long *)&uc->uc_fpstate + 1, fpstate + 4,
+ context_size);
+ return err;
+}
+
+static void setup_sigcontext(struct sigcontext *sc, struct pt_regs *regs,
+ unsigned long mask)
+{
+ sc->sc_mask = mask;
+ sc->sc_usp = rdusp();
+ sc->sc_d0 = regs->d0;
+ sc->sc_d1 = regs->d1;
+ sc->sc_a0 = regs->a0;
+ sc->sc_a1 = regs->a1;
+ sc->sc_sr = regs->sr;
+ sc->sc_pc = regs->pc;
+ sc->sc_formatvec = regs->format << 12 | regs->vector;
+ save_fpu_state(sc, regs);
+}
+
+static inline int rt_setup_ucontext(struct ucontext *uc, struct pt_regs *regs)
+{
+ struct switch_stack *sw = (struct switch_stack *)regs - 1;
+ greg_t *gregs = uc->uc_mcontext.gregs;
+ int err = 0;
+
+ err |= __put_user(MCONTEXT_VERSION, &uc->uc_mcontext.version);
+ err |= __put_user(regs->d0, &gregs[0]);
+ err |= __put_user(regs->d1, &gregs[1]);
+ err |= __put_user(regs->d2, &gregs[2]);
+ err |= __put_user(regs->d3, &gregs[3]);
+ err |= __put_user(regs->d4, &gregs[4]);
+ err |= __put_user(regs->d5, &gregs[5]);
+ err |= __put_user(sw->d6, &gregs[6]);
+ err |= __put_user(sw->d7, &gregs[7]);
+ err |= __put_user(regs->a0, &gregs[8]);
+ err |= __put_user(regs->a1, &gregs[9]);
+ err |= __put_user(regs->a2, &gregs[10]);
+ err |= __put_user(sw->a3, &gregs[11]);
+ err |= __put_user(sw->a4, &gregs[12]);
+ err |= __put_user(sw->a5, &gregs[13]);
+ err |= __put_user(sw->a6, &gregs[14]);
+ err |= __put_user(rdusp(), &gregs[15]);
+ err |= __put_user(regs->pc, &gregs[16]);
+ err |= __put_user(regs->sr, &gregs[17]);
+ err |= __put_user((regs->format << 12) | regs->vector, &uc->uc_formatvec);
+ err |= rt_save_fpu_state(uc, regs);
+ return err;
+}
+
+static inline void push_cache (unsigned long vaddr)
+{
+ /*
+ * Using the old cache_push_v() was really a big waste.
+ *
+ * What we are trying to do is to flush 8 bytes to ram.
+ * Flushing 2 cache lines of 16 bytes is much cheaper than
+ * flushing 1 or 2 pages, as previously done in
+ * cache_push_v().
+ * Jes
+ */
+ if (CPU_IS_040) {
+ unsigned long temp;
+
+ __asm__ __volatile__ (".chip 68040\n\t"
+ "nop\n\t"
+ "ptestr (%1)\n\t"
+ "movec %%mmusr,%0\n\t"
+ ".chip 68k"
+ : "=r" (temp)
+ : "a" (vaddr));
+
+ temp &= PAGE_MASK;
+ temp |= vaddr & ~PAGE_MASK;
+
+ __asm__ __volatile__ (".chip 68040\n\t"
+ "nop\n\t"
+ "cpushl %%bc,(%0)\n\t"
+ ".chip 68k"
+ : : "a" (temp));
+ }
+ else if (CPU_IS_060) {
+ unsigned long temp;
+ __asm__ __volatile__ (".chip 68060\n\t"
+ "plpar (%0)\n\t"
+ ".chip 68k"
+ : "=a" (temp)
+ : "0" (vaddr));
+ __asm__ __volatile__ (".chip 68060\n\t"
+ "cpushl %%bc,(%0)\n\t"
+ ".chip 68k"
+ : : "a" (temp));
+ }
+ else {
+ /*
+ * 68030/68020 have no writeback cache;
+ * still need to clear icache.
+ * Note that vaddr is guaranteed to be long word aligned.
+ */
+ unsigned long temp;
+ asm volatile ("movec %%cacr,%0" : "=r" (temp));
+ temp += 4;
+ asm volatile ("movec %0,%%caar\n\t"
+ "movec %1,%%cacr"
+ : : "r" (vaddr), "r" (temp));
+ asm volatile ("movec %0,%%caar\n\t"
+ "movec %1,%%cacr"
+ : : "r" (vaddr + 4), "r" (temp));
+ }
+}
+
+static inline void *
+get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size)
+{
+ unsigned long usp;
+
+ /* Default to using normal stack. */
+ usp = rdusp();
+
+ /* This is the X/Open sanctioned signal stack switching. */
+ if (ka->sa.sa_flags & SA_ONSTACK) {
+ if (!on_sig_stack(usp))
+ usp = current->sas_ss_sp + current->sas_ss_size;
+ }
+ return (void *)((usp - frame_size) & -8UL);
+}
+
+static void setup_frame (int sig, struct k_sigaction *ka,
+ sigset_t *set, struct pt_regs *regs)
+{
+ struct sigframe *frame;
+ int fsize = frame_extra_sizes[regs->format];
+ struct sigcontext context;
+ int err = 0;
+
+ if (fsize < 0) {
+#ifdef DEBUG
+ printk ("setup_frame: Unknown frame format %#x\n",
+ regs->format);
+#endif
+ goto give_sigsegv;
+ }
+
+ frame = get_sigframe(ka, regs, sizeof(*frame) + fsize);
+
+ if (fsize) {
+ err |= copy_to_user (frame + 1, regs + 1, fsize);
+ regs->stkadj = fsize;
+ }
+
+ err |= __put_user((current_thread_info()->exec_domain
+ && current_thread_info()->exec_domain->signal_invmap
+ && sig < 32
+ ? current_thread_info()->exec_domain->signal_invmap[sig]
+ : sig),
+ &frame->sig);
+
+ err |= __put_user(regs->vector, &frame->code);
+ err |= __put_user(&frame->sc, &frame->psc);
+
+ if (_NSIG_WORDS > 1)
+ err |= copy_to_user(frame->extramask, &set->sig[1],
+ sizeof(frame->extramask));
+
+ setup_sigcontext(&context, regs, set->sig[0]);
+ err |= copy_to_user (&frame->sc, &context, sizeof(context));
+
+ /* Set up to return from userspace. */
+ err |= __put_user(frame->retcode, &frame->pretcode);
+ /* moveq #,d0; trap #0 */
+ err |= __put_user(0x70004e40 + (__NR_sigreturn << 16),
+ (long *)(frame->retcode));
+
+ if (err)
+ goto give_sigsegv;
+
+ push_cache ((unsigned long) &frame->retcode);
+
+ /* Set up registers for signal handler */
+ wrusp ((unsigned long) frame);
+ regs->pc = (unsigned long) ka->sa.sa_handler;
+
+adjust_stack:
+ /* Prepare to skip over the extra stuff in the exception frame. */
+ if (regs->stkadj) {
+ struct pt_regs *tregs =
+ (struct pt_regs *)((ulong)regs + regs->stkadj);
+#ifdef DEBUG
+ printk("Performing stackadjust=%04x\n", regs->stkadj);
+#endif
+ /* This must be copied with decreasing addresses to
+ handle overlaps. */
+ tregs->vector = 0;
+ tregs->format = 0;
+ tregs->pc = regs->pc;
+ tregs->sr = regs->sr;
+ }
+ return;
+
+give_sigsegv:
+ force_sigsegv(sig, current);
+ goto adjust_stack;
+}
+
+static void setup_rt_frame (int sig, struct k_sigaction *ka, siginfo_t *info,
+ sigset_t *set, struct pt_regs *regs)
+{
+ struct rt_sigframe *frame;
+ int fsize = frame_extra_sizes[regs->format];
+ int err = 0;
+
+ if (fsize < 0) {
+#ifdef DEBUG
+ printk ("setup_frame: Unknown frame format %#x\n",
+ regs->format);
+#endif
+ goto give_sigsegv;
+ }
+
+ frame = get_sigframe(ka, regs, sizeof(*frame));
+
+ if (fsize) {
+ err |= copy_to_user (&frame->uc.uc_extra, regs + 1, fsize);
+ regs->stkadj = fsize;
+ }
+
+ err |= __put_user((current_thread_info()->exec_domain
+ && current_thread_info()->exec_domain->signal_invmap
+ && sig < 32
+ ? current_thread_info()->exec_domain->signal_invmap[sig]
+ : sig),
+ &frame->sig);
+ err |= __put_user(&frame->info, &frame->pinfo);
+ err |= __put_user(&frame->uc, &frame->puc);
+ err |= copy_siginfo_to_user(&frame->info, info);
+
+ /* Create the ucontext. */
+ err |= __put_user(0, &frame->uc.uc_flags);
+ err |= __put_user(0, &frame->uc.uc_link);
+ err |= __put_user((void *)current->sas_ss_sp,
+ &frame->uc.uc_stack.ss_sp);
+ err |= __put_user(sas_ss_flags(rdusp()),
+ &frame->uc.uc_stack.ss_flags);
+ err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
+ err |= rt_setup_ucontext(&frame->uc, regs);
+ err |= copy_to_user (&frame->uc.uc_sigmask, set, sizeof(*set));
+
+ /* Set up to return from userspace. */
+ err |= __put_user(frame->retcode, &frame->pretcode);
+ /* moveq #,d0; notb d0; trap #0 */
+ err |= __put_user(0x70004600 + ((__NR_rt_sigreturn ^ 0xff) << 16),
+ (long *)(frame->retcode + 0));
+ err |= __put_user(0x4e40, (short *)(frame->retcode + 4));
+
+ if (err)
+ goto give_sigsegv;
+
+ push_cache ((unsigned long) &frame->retcode);
+
+ /* Set up registers for signal handler */
+ wrusp ((unsigned long) frame);
+ regs->pc = (unsigned long) ka->sa.sa_handler;
+
+adjust_stack:
+ /* Prepare to skip over the extra stuff in the exception frame. */
+ if (regs->stkadj) {
+ struct pt_regs *tregs =
+ (struct pt_regs *)((ulong)regs + regs->stkadj);
+#ifdef DEBUG
+ printk("Performing stackadjust=%04x\n", regs->stkadj);
+#endif
+ /* This must be copied with decreasing addresses to
+ handle overlaps. */
+ tregs->vector = 0;
+ tregs->format = 0;
+ tregs->pc = regs->pc;
+ tregs->sr = regs->sr;
+ }
+ return;
+
+give_sigsegv:
+ force_sigsegv(sig, current);
+ goto adjust_stack;
+}
+
+static inline void
+handle_restart(struct pt_regs *regs, struct k_sigaction *ka, int has_handler)
+{
+ switch (regs->d0) {
+ case -ERESTARTNOHAND:
+ if (!has_handler)
+ goto do_restart;
+ regs->d0 = -EINTR;
+ break;
+
+ case -ERESTARTSYS:
+ if (has_handler && !(ka->sa.sa_flags & SA_RESTART)) {
+ regs->d0 = -EINTR;
+ break;
+ }
+ /* fallthrough */
+ case -ERESTARTNOINTR:
+ do_restart:
+ regs->d0 = regs->orig_d0;
+ regs->pc -= 2;
+ break;
+ }
+}
+
+void ptrace_signal_deliver(struct pt_regs *regs, void *cookie)
+{
+ if (regs->orig_d0 < 0)
+ return;
+ switch (regs->d0) {
+ case -ERESTARTNOHAND:
+ case -ERESTARTSYS:
+ case -ERESTARTNOINTR:
+ regs->d0 = regs->orig_d0;
+ regs->orig_d0 = -1;
+ regs->pc -= 2;
+ break;
+ }
+}
+
+/*
+ * OK, we're invoking a handler
+ */
+static void
+handle_signal(int sig, struct k_sigaction *ka, siginfo_t *info,
+ sigset_t *oldset, struct pt_regs *regs)
+{
+ /* are we from a system call? */
+ if (regs->orig_d0 >= 0)
+ /* If so, check system call restarting.. */
+ handle_restart(regs, ka, 1);
+
+ /* set up the stack frame */
+ if (ka->sa.sa_flags & SA_SIGINFO)
+ setup_rt_frame(sig, ka, info, oldset, regs);
+ else
+ setup_frame(sig, ka, oldset, regs);
+
+ if (ka->sa.sa_flags & SA_ONESHOT)
+ ka->sa.sa_handler = SIG_DFL;
+
+ sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
+ if (!(ka->sa.sa_flags & SA_NODEFER))
+ sigaddset(&current->blocked,sig);
+ recalc_sigpending();
+}
+
+/*
+ * Note that 'init' is a special process: it doesn't get signals it doesn't
+ * want to handle. Thus you cannot kill init even with a SIGKILL even by
+ * mistake.
+ */
+asmlinkage int do_signal(sigset_t *oldset, struct pt_regs *regs)
+{
+ siginfo_t info;
+ struct k_sigaction ka;
+ int signr;
+
+ current->thread.esp0 = (unsigned long) regs;
+
+ if (!oldset)
+ oldset = &current->blocked;
+
+ signr = get_signal_to_deliver(&info, &ka, regs, NULL);
+ if (signr > 0) {
+ /* Whee! Actually deliver the signal. */
+ handle_signal(signr, &ka, &info, oldset, regs);
+ return 1;
+ }
+
+ /* Did we come from a system call? */
+ if (regs->orig_d0 >= 0)
+ /* Restart the system call - no handlers present */
+ handle_restart(regs, NULL, 0);
+
+ return 0;
+}
diff --git a/arch/m68k/kernel/sun3-head.S b/arch/m68k/kernel/sun3-head.S
new file mode 100644
index 00000000000..bffd69a4a1a
--- /dev/null
+++ b/arch/m68k/kernel/sun3-head.S
@@ -0,0 +1,104 @@
+#include <linux/linkage.h>
+
+#include <asm/entry.h>
+#include <asm/page.h>
+#include <asm/contregs.h>
+#include <asm/sun3-head.h>
+
+PSL_HIGHIPL = 0x2700
+NBSG = 0x20000
+ICACHE_ONLY = 0x00000009
+CACHES_OFF = 0x00000008 | actually a clear and disable --m
+#define MAS_STACK INT_STACK
+ROOT_TABLE_SIZE = 128
+PAGESIZE = 8192
+SUN3_INVALID_PMEG = 255
+.globl bootup_user_stack
+.globl bootup_kernel_stack
+.globl pg0
+.globl swapper_pg_dir
+.globl kernel_pmd_table
+.globl availmem
+.global m68k_pgtable_cachemode
+.global kpt
+| todo: all these should be in bss!
+swapper_pg_dir: .skip 0x2000
+pg0: .skip 0x2000
+kernel_pmd_table: .skip 0x2000
+
+.globl kernel_pg_dir
+.equ kernel_pg_dir,kernel_pmd_table
+
+ .section .head
+ENTRY(_stext)
+ENTRY(_start)
+
+/* Firstly, disable interrupts and set up function codes. */
+ movew #PSL_HIGHIPL, %sr
+ moveq #FC_CONTROL, %d0
+ movec %d0, %sfc
+ movec %d0, %dfc
+
+/* Make sure we're in context zero. */
+ moveq #0, %d0
+ movsb %d0, AC_CONTEXT
+
+/* map everything the bootloader left us into high memory, clean up the
+ excess later */
+ lea (AC_SEGMAP+0),%a0
+ lea (AC_SEGMAP+KERNBASE),%a1
+1:
+ movsb %a0@, %d1
+ movsb %d1, %a1@
+ cmpib #SUN3_INVALID_PMEG, %d1
+ beq 2f
+ addl #NBSG,%a0
+ addl #NBSG,%a1
+ jmp 1b
+
+2:
+
+/* Disable caches and jump to high code. */
+ moveq #ICACHE_ONLY,%d0 | Cache disabled until we're ready to enable it
+ movc %d0, %cacr | is this the right value? (yes --m)
+ jmp 1f:l
+
+/* Following code executes at high addresses (0xE000xxx). */
+1: lea init_task,%curptr | get initial thread...
+ lea init_thread_union+THREAD_SIZE,%sp | ...and its stack.
+
+/* copy bootinfo records from the loader to _end */
+ lea _end, %a1
+ lea BI_START, %a0
+ /* number of longs to copy */
+ movel %a0@, %d0
+1: addl #4, %a0
+ movel %a0@, %a1@
+ addl #4, %a1
+ dbf %d0, 1b
+
+/* Point MSP at an invalid page to trap if it's used. --m */
+ movl #(PAGESIZE),%d0
+ movc %d0,%msp
+ moveq #-1,%d0
+ movsb %d0,(AC_SEGMAP+0x0)
+
+ jbsr sun3_init
+
+ jbsr base_trap_init
+
+ jbsr start_kernel
+ trap #15
+
+ .data
+ .even
+kpt:
+ .long 0
+availmem:
+ .long 0
+| todo: remove next two. --m
+is_medusa:
+ .long 0
+m68k_pgtable_cachemode:
+ .long 0
+
diff --git a/arch/m68k/kernel/sys_m68k.c b/arch/m68k/kernel/sys_m68k.c
new file mode 100644
index 00000000000..2ed7b783f65
--- /dev/null
+++ b/arch/m68k/kernel/sys_m68k.c
@@ -0,0 +1,671 @@
+/*
+ * linux/arch/m68k/kernel/sys_m68k.c
+ *
+ * This file contains various random system calls that
+ * have a non-standard calling sequence on the Linux/m68k
+ * platform.
+ */
+
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/sem.h>
+#include <linux/msg.h>
+#include <linux/shm.h>
+#include <linux/stat.h>
+#include <linux/syscalls.h>
+#include <linux/mman.h>
+#include <linux/file.h>
+#include <linux/utsname.h>
+
+#include <asm/setup.h>
+#include <asm/uaccess.h>
+#include <asm/cachectl.h>
+#include <asm/traps.h>
+#include <asm/ipc.h>
+#include <asm/page.h>
+
+/*
+ * sys_pipe() is the normal C calling standard for creating
+ * a pipe. It's not the way unix traditionally does this, though.
+ */
+asmlinkage int sys_pipe(unsigned long * fildes)
+{
+ int fd[2];
+ int error;
+
+ error = do_pipe(fd);
+ if (!error) {
+ if (copy_to_user(fildes, fd, 2*sizeof(int)))
+ error = -EFAULT;
+ }
+ return error;
+}
+
+/* common code for old and new mmaps */
+static inline long do_mmap2(
+ unsigned long addr, unsigned long len,
+ unsigned long prot, unsigned long flags,
+ unsigned long fd, unsigned long pgoff)
+{
+ int error = -EBADF;
+ struct file * file = NULL;
+
+ flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
+ if (!(flags & MAP_ANONYMOUS)) {
+ file = fget(fd);
+ if (!file)
+ goto out;
+ }
+
+ down_write(&current->mm->mmap_sem);
+ error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
+ up_write(&current->mm->mmap_sem);
+
+ if (file)
+ fput(file);
+out:
+ return error;
+}
+
+asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
+ unsigned long prot, unsigned long flags,
+ unsigned long fd, unsigned long pgoff)
+{
+ return do_mmap2(addr, len, prot, flags, fd, pgoff);
+}
+
+/*
+ * Perform the select(nd, in, out, ex, tv) and mmap() system
+ * calls. Linux/m68k cloned Linux/i386, which didn't use to be able to
+ * handle more than 4 system call parameters, so these system calls
+ * used a memory block for parameter passing..
+ */
+
+struct mmap_arg_struct {
+ unsigned long addr;
+ unsigned long len;
+ unsigned long prot;
+ unsigned long flags;
+ unsigned long fd;
+ unsigned long offset;
+};
+
+asmlinkage int old_mmap(struct mmap_arg_struct *arg)
+{
+ struct mmap_arg_struct a;
+ int error = -EFAULT;
+
+ if (copy_from_user(&a, arg, sizeof(a)))
+ goto out;
+
+ error = -EINVAL;
+ if (a.offset & ~PAGE_MASK)
+ goto out;
+
+ a.flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
+
+ error = do_mmap2(a.addr, a.len, a.prot, a.flags, a.fd, a.offset >> PAGE_SHIFT);
+out:
+ return error;
+}
+
+#if 0
+struct mmap_arg_struct64 {
+ __u32 addr;
+ __u32 len;
+ __u32 prot;
+ __u32 flags;
+ __u64 offset; /* 64 bits */
+ __u32 fd;
+};
+
+asmlinkage long sys_mmap64(struct mmap_arg_struct64 *arg)
+{
+ int error = -EFAULT;
+ struct file * file = NULL;
+ struct mmap_arg_struct64 a;
+ unsigned long pgoff;
+
+ if (copy_from_user(&a, arg, sizeof(a)))
+ return -EFAULT;
+
+ if ((long)a.offset & ~PAGE_MASK)
+ return -EINVAL;
+
+ pgoff = a.offset >> PAGE_SHIFT;
+ if ((a.offset >> PAGE_SHIFT) != pgoff)
+ return -EINVAL;
+
+ if (!(a.flags & MAP_ANONYMOUS)) {
+ error = -EBADF;
+ file = fget(a.fd);
+ if (!file)
+ goto out;
+ }
+ a.flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
+
+ down_write(&current->mm->mmap_sem);
+ error = do_mmap_pgoff(file, a.addr, a.len, a.prot, a.flags, pgoff);
+ up_write(&current->mm->mmap_sem);
+ if (file)
+ fput(file);
+out:
+ return error;
+}
+#endif
+
+struct sel_arg_struct {
+ unsigned long n;
+ fd_set *inp, *outp, *exp;
+ struct timeval *tvp;
+};
+
+asmlinkage int old_select(struct sel_arg_struct *arg)
+{
+ struct sel_arg_struct a;
+
+ if (copy_from_user(&a, arg, sizeof(a)))
+ return -EFAULT;
+ /* sys_select() does the appropriate kernel locking */
+ return sys_select(a.n, a.inp, a.outp, a.exp, a.tvp);
+}
+
+/*
+ * sys_ipc() is the de-multiplexer for the SysV IPC calls..
+ *
+ * This is really horribly ugly.
+ */
+asmlinkage int sys_ipc (uint call, int first, int second,
+ int third, void *ptr, long fifth)
+{
+ int version, ret;
+
+ version = call >> 16; /* hack for backward compatibility */
+ call &= 0xffff;
+
+ if (call <= SEMCTL)
+ switch (call) {
+ case SEMOP:
+ return sys_semop (first, (struct sembuf *)ptr, second);
+ case SEMGET:
+ return sys_semget (first, second, third);
+ case SEMCTL: {
+ union semun fourth;
+ if (!ptr)
+ return -EINVAL;
+ if (get_user(fourth.__pad, (void **) ptr))
+ return -EFAULT;
+ return sys_semctl (first, second, third, fourth);
+ }
+ default:
+ return -ENOSYS;
+ }
+ if (call <= MSGCTL)
+ switch (call) {
+ case MSGSND:
+ return sys_msgsnd (first, (struct msgbuf *) ptr,
+ second, third);
+ case MSGRCV:
+ switch (version) {
+ case 0: {
+ struct ipc_kludge tmp;
+ if (!ptr)
+ return -EINVAL;
+ if (copy_from_user (&tmp,
+ (struct ipc_kludge *)ptr,
+ sizeof (tmp)))
+ return -EFAULT;
+ return sys_msgrcv (first, tmp.msgp, second,
+ tmp.msgtyp, third);
+ }
+ default:
+ return sys_msgrcv (first,
+ (struct msgbuf *) ptr,
+ second, fifth, third);
+ }
+ case MSGGET:
+ return sys_msgget ((key_t) first, second);
+ case MSGCTL:
+ return sys_msgctl (first, second,
+ (struct msqid_ds *) ptr);
+ default:
+ return -ENOSYS;
+ }
+ if (call <= SHMCTL)
+ switch (call) {
+ case SHMAT:
+ switch (version) {
+ default: {
+ ulong raddr;
+ ret = do_shmat (first, (char *) ptr,
+ second, &raddr);
+ if (ret)
+ return ret;
+ return put_user (raddr, (ulong *) third);
+ }
+ }
+ case SHMDT:
+ return sys_shmdt ((char *)ptr);
+ case SHMGET:
+ return sys_shmget (first, second, third);
+ case SHMCTL:
+ return sys_shmctl (first, second,
+ (struct shmid_ds *) ptr);
+ default:
+ return -ENOSYS;
+ }
+
+ return -EINVAL;
+}
+
+/* Convert virtual (user) address VADDR to physical address PADDR */
+#define virt_to_phys_040(vaddr) \
+({ \
+ unsigned long _mmusr, _paddr; \
+ \
+ __asm__ __volatile__ (".chip 68040\n\t" \
+ "ptestr (%1)\n\t" \
+ "movec %%mmusr,%0\n\t" \
+ ".chip 68k" \
+ : "=r" (_mmusr) \
+ : "a" (vaddr)); \
+ _paddr = (_mmusr & MMU_R_040) ? (_mmusr & PAGE_MASK) : 0; \
+ _paddr; \
+})
+
+static inline int
+cache_flush_040 (unsigned long addr, int scope, int cache, unsigned long len)
+{
+ unsigned long paddr, i;
+
+ switch (scope)
+ {
+ case FLUSH_SCOPE_ALL:
+ switch (cache)
+ {
+ case FLUSH_CACHE_DATA:
+ /* This nop is needed for some broken versions of the 68040. */
+ __asm__ __volatile__ ("nop\n\t"
+ ".chip 68040\n\t"
+ "cpusha %dc\n\t"
+ ".chip 68k");
+ break;
+ case FLUSH_CACHE_INSN:
+ __asm__ __volatile__ ("nop\n\t"
+ ".chip 68040\n\t"
+ "cpusha %ic\n\t"
+ ".chip 68k");
+ break;
+ default:
+ case FLUSH_CACHE_BOTH:
+ __asm__ __volatile__ ("nop\n\t"
+ ".chip 68040\n\t"
+ "cpusha %bc\n\t"
+ ".chip 68k");
+ break;
+ }
+ break;
+
+ case FLUSH_SCOPE_LINE:
+ /* Find the physical address of the first mapped page in the
+ address range. */
+ if ((paddr = virt_to_phys_040(addr))) {
+ paddr += addr & ~(PAGE_MASK | 15);
+ len = (len + (addr & 15) + 15) >> 4;
+ } else {
+ unsigned long tmp = PAGE_SIZE - (addr & ~PAGE_MASK);
+
+ if (len <= tmp)
+ return 0;
+ addr += tmp;
+ len -= tmp;
+ tmp = PAGE_SIZE;
+ for (;;)
+ {
+ if ((paddr = virt_to_phys_040(addr)))
+ break;
+ if (len <= tmp)
+ return 0;
+ addr += tmp;
+ len -= tmp;
+ }
+ len = (len + 15) >> 4;
+ }
+ i = (PAGE_SIZE - (paddr & ~PAGE_MASK)) >> 4;
+ while (len--)
+ {
+ switch (cache)
+ {
+ case FLUSH_CACHE_DATA:
+ __asm__ __volatile__ ("nop\n\t"
+ ".chip 68040\n\t"
+ "cpushl %%dc,(%0)\n\t"
+ ".chip 68k"
+ : : "a" (paddr));
+ break;
+ case FLUSH_CACHE_INSN:
+ __asm__ __volatile__ ("nop\n\t"
+ ".chip 68040\n\t"
+ "cpushl %%ic,(%0)\n\t"
+ ".chip 68k"
+ : : "a" (paddr));
+ break;
+ default:
+ case FLUSH_CACHE_BOTH:
+ __asm__ __volatile__ ("nop\n\t"
+ ".chip 68040\n\t"
+ "cpushl %%bc,(%0)\n\t"
+ ".chip 68k"
+ : : "a" (paddr));
+ break;
+ }
+ if (!--i && len)
+ {
+ /*
+ * No need to page align here since it is done by
+ * virt_to_phys_040().
+ */
+ addr += PAGE_SIZE;
+ i = PAGE_SIZE / 16;
+ /* Recompute physical address when crossing a page
+ boundary. */
+ for (;;)
+ {
+ if ((paddr = virt_to_phys_040(addr)))
+ break;
+ if (len <= i)
+ return 0;
+ len -= i;
+ addr += PAGE_SIZE;
+ }
+ }
+ else
+ paddr += 16;
+ }
+ break;
+
+ default:
+ case FLUSH_SCOPE_PAGE:
+ len += (addr & ~PAGE_MASK) + (PAGE_SIZE - 1);
+ for (len >>= PAGE_SHIFT; len--; addr += PAGE_SIZE)
+ {
+ if (!(paddr = virt_to_phys_040(addr)))
+ continue;
+ switch (cache)
+ {
+ case FLUSH_CACHE_DATA:
+ __asm__ __volatile__ ("nop\n\t"
+ ".chip 68040\n\t"
+ "cpushp %%dc,(%0)\n\t"
+ ".chip 68k"
+ : : "a" (paddr));
+ break;
+ case FLUSH_CACHE_INSN:
+ __asm__ __volatile__ ("nop\n\t"
+ ".chip 68040\n\t"
+ "cpushp %%ic,(%0)\n\t"
+ ".chip 68k"
+ : : "a" (paddr));
+ break;
+ default:
+ case FLUSH_CACHE_BOTH:
+ __asm__ __volatile__ ("nop\n\t"
+ ".chip 68040\n\t"
+ "cpushp %%bc,(%0)\n\t"
+ ".chip 68k"
+ : : "a" (paddr));
+ break;
+ }
+ }
+ break;
+ }
+ return 0;
+}
+
+#define virt_to_phys_060(vaddr) \
+({ \
+ unsigned long paddr; \
+ __asm__ __volatile__ (".chip 68060\n\t" \
+ "plpar (%0)\n\t" \
+ ".chip 68k" \
+ : "=a" (paddr) \
+ : "0" (vaddr)); \
+ (paddr); /* XXX */ \
+})
+
+static inline int
+cache_flush_060 (unsigned long addr, int scope, int cache, unsigned long len)
+{
+ unsigned long paddr, i;
+
+ /*
+ * 68060 manual says:
+ * cpush %dc : flush DC, remains valid (with our %cacr setup)
+ * cpush %ic : invalidate IC
+ * cpush %bc : flush DC + invalidate IC
+ */
+ switch (scope)
+ {
+ case FLUSH_SCOPE_ALL:
+ switch (cache)
+ {
+ case FLUSH_CACHE_DATA:
+ __asm__ __volatile__ (".chip 68060\n\t"
+ "cpusha %dc\n\t"
+ ".chip 68k");
+ break;
+ case FLUSH_CACHE_INSN:
+ __asm__ __volatile__ (".chip 68060\n\t"
+ "cpusha %ic\n\t"
+ ".chip 68k");
+ break;
+ default:
+ case FLUSH_CACHE_BOTH:
+ __asm__ __volatile__ (".chip 68060\n\t"
+ "cpusha %bc\n\t"
+ ".chip 68k");
+ break;
+ }
+ break;
+
+ case FLUSH_SCOPE_LINE:
+ /* Find the physical address of the first mapped page in the
+ address range. */
+ len += addr & 15;
+ addr &= -16;
+ if (!(paddr = virt_to_phys_060(addr))) {
+ unsigned long tmp = PAGE_SIZE - (addr & ~PAGE_MASK);
+
+ if (len <= tmp)
+ return 0;
+ addr += tmp;
+ len -= tmp;
+ tmp = PAGE_SIZE;
+ for (;;)
+ {
+ if ((paddr = virt_to_phys_060(addr)))
+ break;
+ if (len <= tmp)
+ return 0;
+ addr += tmp;
+ len -= tmp;
+ }
+ }
+ len = (len + 15) >> 4;
+ i = (PAGE_SIZE - (paddr & ~PAGE_MASK)) >> 4;
+ while (len--)
+ {
+ switch (cache)
+ {
+ case FLUSH_CACHE_DATA:
+ __asm__ __volatile__ (".chip 68060\n\t"
+ "cpushl %%dc,(%0)\n\t"
+ ".chip 68k"
+ : : "a" (paddr));
+ break;
+ case FLUSH_CACHE_INSN:
+ __asm__ __volatile__ (".chip 68060\n\t"
+ "cpushl %%ic,(%0)\n\t"
+ ".chip 68k"
+ : : "a" (paddr));
+ break;
+ default:
+ case FLUSH_CACHE_BOTH:
+ __asm__ __volatile__ (".chip 68060\n\t"
+ "cpushl %%bc,(%0)\n\t"
+ ".chip 68k"
+ : : "a" (paddr));
+ break;
+ }
+ if (!--i && len)
+ {
+
+ /*
+ * We just want to jump to the first cache line
+ * in the next page.
+ */
+ addr += PAGE_SIZE;
+ addr &= PAGE_MASK;
+
+ i = PAGE_SIZE / 16;
+ /* Recompute physical address when crossing a page
+ boundary. */
+ for (;;)
+ {
+ if ((paddr = virt_to_phys_060(addr)))
+ break;
+ if (len <= i)
+ return 0;
+ len -= i;
+ addr += PAGE_SIZE;
+ }
+ }
+ else
+ paddr += 16;
+ }
+ break;
+
+ default:
+ case FLUSH_SCOPE_PAGE:
+ len += (addr & ~PAGE_MASK) + (PAGE_SIZE - 1);
+ addr &= PAGE_MASK; /* Workaround for bug in some
+ revisions of the 68060 */
+ for (len >>= PAGE_SHIFT; len--; addr += PAGE_SIZE)
+ {
+ if (!(paddr = virt_to_phys_060(addr)))
+ continue;
+ switch (cache)
+ {
+ case FLUSH_CACHE_DATA:
+ __asm__ __volatile__ (".chip 68060\n\t"
+ "cpushp %%dc,(%0)\n\t"
+ ".chip 68k"
+ : : "a" (paddr));
+ break;
+ case FLUSH_CACHE_INSN:
+ __asm__ __volatile__ (".chip 68060\n\t"
+ "cpushp %%ic,(%0)\n\t"
+ ".chip 68k"
+ : : "a" (paddr));
+ break;
+ default:
+ case FLUSH_CACHE_BOTH:
+ __asm__ __volatile__ (".chip 68060\n\t"
+ "cpushp %%bc,(%0)\n\t"
+ ".chip 68k"
+ : : "a" (paddr));
+ break;
+ }
+ }
+ break;
+ }
+ return 0;
+}
+
+/* sys_cacheflush -- flush (part of) the processor cache. */
+asmlinkage int
+sys_cacheflush (unsigned long addr, int scope, int cache, unsigned long len)
+{
+ struct vm_area_struct *vma;
+ int ret = -EINVAL;
+
+ lock_kernel();
+ if (scope < FLUSH_SCOPE_LINE || scope > FLUSH_SCOPE_ALL ||
+ cache & ~FLUSH_CACHE_BOTH)
+ goto out;
+
+ if (scope == FLUSH_SCOPE_ALL) {
+ /* Only the superuser may explicitly flush the whole cache. */
+ ret = -EPERM;
+ if (!capable(CAP_SYS_ADMIN))
+ goto out;
+ } else {
+ /*
+ * Verify that the specified address region actually belongs
+ * to this process.
+ */
+ vma = find_vma (current->mm, addr);
+ ret = -EINVAL;
+ /* Check for overflow. */
+ if (addr + len < addr)
+ goto out;
+ if (vma == NULL || addr < vma->vm_start || addr + len > vma->vm_end)
+ goto out;
+ }
+
+ if (CPU_IS_020_OR_030) {
+ if (scope == FLUSH_SCOPE_LINE && len < 256) {
+ unsigned long cacr;
+ __asm__ ("movec %%cacr, %0" : "=r" (cacr));
+ if (cache & FLUSH_CACHE_INSN)
+ cacr |= 4;
+ if (cache & FLUSH_CACHE_DATA)
+ cacr |= 0x400;
+ len >>= 2;
+ while (len--) {
+ __asm__ __volatile__ ("movec %1, %%caar\n\t"
+ "movec %0, %%cacr"
+ : /* no outputs */
+ : "r" (cacr), "r" (addr));
+ addr += 4;
+ }
+ } else {
+ /* Flush the whole cache, even if page granularity requested. */
+ unsigned long cacr;
+ __asm__ ("movec %%cacr, %0" : "=r" (cacr));
+ if (cache & FLUSH_CACHE_INSN)
+ cacr |= 8;
+ if (cache & FLUSH_CACHE_DATA)
+ cacr |= 0x800;
+ __asm__ __volatile__ ("movec %0, %%cacr" : : "r" (cacr));
+ }
+ ret = 0;
+ goto out;
+ } else {
+ /*
+ * 040 or 060: don't blindly trust 'scope', someone could
+ * try to flush a few megs of memory.
+ */
+
+ if (len>=3*PAGE_SIZE && scope<FLUSH_SCOPE_PAGE)
+ scope=FLUSH_SCOPE_PAGE;
+ if (len>=10*PAGE_SIZE && scope<FLUSH_SCOPE_ALL)
+ scope=FLUSH_SCOPE_ALL;
+ if (CPU_IS_040) {
+ ret = cache_flush_040 (addr, scope, cache, len);
+ } else if (CPU_IS_060) {
+ ret = cache_flush_060 (addr, scope, cache, len);
+ }
+ }
+out:
+ unlock_kernel();
+ return ret;
+}
+
+asmlinkage int sys_getpagesize(void)
+{
+ return PAGE_SIZE;
+}
diff --git a/arch/m68k/kernel/time.c b/arch/m68k/kernel/time.c
new file mode 100644
index 00000000000..e47e1958852
--- /dev/null
+++ b/arch/m68k/kernel/time.c
@@ -0,0 +1,187 @@
+/*
+ * linux/arch/m68k/kernel/time.c
+ *
+ * Copyright (C) 1991, 1992, 1995 Linus Torvalds
+ *
+ * This file contains the m68k-specific time handling details.
+ * Most of the stuff is located in the machine specific files.
+ *
+ * 1997-09-10 Updated NTP code according to technical memorandum Jan '96
+ * "A Kernel Model for Precision Timekeeping" by Dave Mills
+ */
+
+#include <linux/config.h> /* CONFIG_HEARTBEAT */
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/param.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/rtc.h>
+
+#include <asm/machdep.h>
+#include <asm/io.h>
+
+#include <linux/time.h>
+#include <linux/timex.h>
+#include <linux/profile.h>
+
+u64 jiffies_64 = INITIAL_JIFFIES;
+
+EXPORT_SYMBOL(jiffies_64);
+
+static inline int set_rtc_mmss(unsigned long nowtime)
+{
+ if (mach_set_clock_mmss)
+ return mach_set_clock_mmss (nowtime);
+ return -1;
+}
+
+/*
+ * timer_interrupt() needs to keep up the real-time clock,
+ * as well as call the "do_timer()" routine every clocktick
+ */
+static irqreturn_t timer_interrupt(int irq, void *dummy, struct pt_regs * regs)
+{
+ do_timer(regs);
+#ifndef CONFIG_SMP
+ update_process_times(user_mode(regs));
+#endif
+ profile_tick(CPU_PROFILING, regs);
+
+#ifdef CONFIG_HEARTBEAT
+ /* use power LED as a heartbeat instead -- much more useful
+ for debugging -- based on the version for PReP by Cort */
+ /* acts like an actual heart beat -- ie thump-thump-pause... */
+ if (mach_heartbeat) {
+ static unsigned cnt = 0, period = 0, dist = 0;
+
+ if (cnt == 0 || cnt == dist)
+ mach_heartbeat( 1 );
+ else if (cnt == 7 || cnt == dist+7)
+ mach_heartbeat( 0 );
+
+ if (++cnt > period) {
+ cnt = 0;
+ /* The hyperbolic function below modifies the heartbeat period
+ * length in dependency of the current (5min) load. It goes
+ * through the points f(0)=126, f(1)=86, f(5)=51,
+ * f(inf)->30. */
+ period = ((672<<FSHIFT)/(5*avenrun[0]+(7<<FSHIFT))) + 30;
+ dist = period / 4;
+ }
+ }
+#endif /* CONFIG_HEARTBEAT */
+ return IRQ_HANDLED;
+}
+
+void time_init(void)
+{
+ struct rtc_time time;
+
+ if (mach_hwclk) {
+ mach_hwclk(0, &time);
+
+ if ((time.tm_year += 1900) < 1970)
+ time.tm_year += 100;
+ xtime.tv_sec = mktime(time.tm_year, time.tm_mon, time.tm_mday,
+ time.tm_hour, time.tm_min, time.tm_sec);
+ xtime.tv_nsec = 0;
+ }
+ wall_to_monotonic.tv_sec = -xtime.tv_sec;
+
+ mach_sched_init(timer_interrupt);
+}
+
+/*
+ * This version of gettimeofday has near microsecond resolution.
+ */
+void do_gettimeofday(struct timeval *tv)
+{
+ unsigned long flags;
+ extern unsigned long wall_jiffies;
+ unsigned long seq;
+ unsigned long usec, sec, lost;
+ unsigned long max_ntp_tick = tick_usec - tickadj;
+
+ do {
+ seq = read_seqbegin_irqsave(&xtime_lock, flags);
+
+ usec = mach_gettimeoffset();
+ lost = jiffies - wall_jiffies;
+
+ /*
+ * If time_adjust is negative then NTP is slowing the clock
+ * so make sure not to go into next possible interval.
+ * Better to lose some accuracy than have time go backwards..
+ */
+ if (unlikely(time_adjust < 0)) {
+ usec = min(usec, max_ntp_tick);
+
+ if (lost)
+ usec += lost * max_ntp_tick;
+ }
+ else if (unlikely(lost))
+ usec += lost * tick_usec;
+
+ sec = xtime.tv_sec;
+ usec += xtime.tv_nsec/1000;
+ } while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
+
+
+ while (usec >= 1000000) {
+ usec -= 1000000;
+ sec++;
+ }
+
+ tv->tv_sec = sec;
+ tv->tv_usec = usec;
+}
+
+EXPORT_SYMBOL(do_gettimeofday);
+
+int do_settimeofday(struct timespec *tv)
+{
+ time_t wtm_sec, sec = tv->tv_sec;
+ long wtm_nsec, nsec = tv->tv_nsec;
+ extern unsigned long wall_jiffies;
+
+ if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
+ return -EINVAL;
+
+ write_seqlock_irq(&xtime_lock);
+ /* This is revolting. We need to set the xtime.tv_nsec
+ * correctly. However, the value in this location is
+ * is value at the last tick.
+ * Discover what correction gettimeofday
+ * would have done, and then undo it!
+ */
+ nsec -= 1000 * (mach_gettimeoffset() +
+ (jiffies - wall_jiffies) * (1000000 / HZ));
+
+ wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
+ wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);
+
+ set_normalized_timespec(&xtime, sec, nsec);
+ set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
+
+ time_adjust = 0; /* stop active adjtime() */
+ time_status |= STA_UNSYNC;
+ time_maxerror = NTP_PHASE_LIMIT;
+ time_esterror = NTP_PHASE_LIMIT;
+ write_sequnlock_irq(&xtime_lock);
+ clock_was_set();
+ return 0;
+}
+
+EXPORT_SYMBOL(do_settimeofday);
+
+/*
+ * Scheduler clock - returns current time in ns units.
+ */
+unsigned long long sched_clock(void)
+{
+ return (unsigned long long)jiffies*(1000000000/HZ);
+}
+
diff --git a/arch/m68k/kernel/traps.c b/arch/m68k/kernel/traps.c
new file mode 100644
index 00000000000..deb36e8b04a
--- /dev/null
+++ b/arch/m68k/kernel/traps.c
@@ -0,0 +1,1227 @@
+/*
+ * linux/arch/m68k/kernel/traps.c
+ *
+ * Copyright (C) 1993, 1994 by Hamish Macdonald
+ *
+ * 68040 fixes by Michael Rausch
+ * 68040 fixes by Martin Apel
+ * 68040 fixes and writeback by Richard Zidlicky
+ * 68060 fixes by Roman Hodek
+ * 68060 fixes by Jesper Skov
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of this archive
+ * for more details.
+ */
+
+/*
+ * Sets up all exception vectors
+ */
+
+#include <linux/config.h>
+#include <linux/sched.h>
+#include <linux/signal.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/a.out.h>
+#include <linux/user.h>
+#include <linux/string.h>
+#include <linux/linkage.h>
+#include <linux/init.h>
+#include <linux/ptrace.h>
+#include <linux/kallsyms.h>
+
+#include <asm/setup.h>
+#include <asm/fpu.h>
+#include <asm/system.h>
+#include <asm/uaccess.h>
+#include <asm/traps.h>
+#include <asm/pgalloc.h>
+#include <asm/machdep.h>
+#include <asm/siginfo.h>
+
+/* assembler routines */
+asmlinkage void system_call(void);
+asmlinkage void buserr(void);
+asmlinkage void trap(void);
+asmlinkage void inthandler(void);
+asmlinkage void nmihandler(void);
+#ifdef CONFIG_M68KFPU_EMU
+asmlinkage void fpu_emu(void);
+#endif
+
+e_vector vectors[256] = {
+ [VEC_BUSERR] = buserr,
+ [VEC_ADDRERR] = trap,
+ [VEC_ILLEGAL] = trap,
+ [VEC_ZERODIV] = trap,
+ [VEC_CHK] = trap,
+ [VEC_TRAP] = trap,
+ [VEC_PRIV] = trap,
+ [VEC_TRACE] = trap,
+ [VEC_LINE10] = trap,
+ [VEC_LINE11] = trap,
+ [VEC_RESV12] = trap,
+ [VEC_COPROC] = trap,
+ [VEC_FORMAT] = trap,
+ [VEC_UNINT] = trap,
+ [VEC_RESV16] = trap,
+ [VEC_RESV17] = trap,
+ [VEC_RESV18] = trap,
+ [VEC_RESV19] = trap,
+ [VEC_RESV20] = trap,
+ [VEC_RESV21] = trap,
+ [VEC_RESV22] = trap,
+ [VEC_RESV23] = trap,
+ [VEC_SPUR] = inthandler,
+ [VEC_INT1] = inthandler,
+ [VEC_INT2] = inthandler,
+ [VEC_INT3] = inthandler,
+ [VEC_INT4] = inthandler,
+ [VEC_INT5] = inthandler,
+ [VEC_INT6] = inthandler,
+ [VEC_INT7] = inthandler,
+ [VEC_SYS] = system_call,
+ [VEC_TRAP1] = trap,
+ [VEC_TRAP2] = trap,
+ [VEC_TRAP3] = trap,
+ [VEC_TRAP4] = trap,
+ [VEC_TRAP5] = trap,
+ [VEC_TRAP6] = trap,
+ [VEC_TRAP7] = trap,
+ [VEC_TRAP8] = trap,
+ [VEC_TRAP9] = trap,
+ [VEC_TRAP10] = trap,
+ [VEC_TRAP11] = trap,
+ [VEC_TRAP12] = trap,
+ [VEC_TRAP13] = trap,
+ [VEC_TRAP14] = trap,
+ [VEC_TRAP15] = trap,
+};
+
+/* nmi handler for the Amiga */
+asm(".text\n"
+ __ALIGN_STR "\n"
+ "nmihandler: rte");
+
+/*
+ * this must be called very early as the kernel might
+ * use some instruction that are emulated on the 060
+ */
+void __init base_trap_init(void)
+{
+ if(MACH_IS_SUN3X) {
+ extern e_vector *sun3x_prom_vbr;
+
+ __asm__ volatile ("movec %%vbr, %0" : "=r" ((void*)sun3x_prom_vbr));
+ }
+
+ /* setup the exception vector table */
+ __asm__ volatile ("movec %0,%%vbr" : : "r" ((void*)vectors));
+
+ if (CPU_IS_060) {
+ /* set up ISP entry points */
+ asmlinkage void unimp_vec(void) asm ("_060_isp_unimp");
+
+ vectors[VEC_UNIMPII] = unimp_vec;
+ }
+}
+
+void __init trap_init (void)
+{
+ int i;
+
+ for (i = 48; i < 64; i++)
+ if (!vectors[i])
+ vectors[i] = trap;
+
+ for (i = 64; i < 256; i++)
+ vectors[i] = inthandler;
+
+#ifdef CONFIG_M68KFPU_EMU
+ if (FPU_IS_EMU)
+ vectors[VEC_LINE11] = fpu_emu;
+#endif
+
+ if (CPU_IS_040 && !FPU_IS_EMU) {
+ /* set up FPSP entry points */
+ asmlinkage void dz_vec(void) asm ("dz");
+ asmlinkage void inex_vec(void) asm ("inex");
+ asmlinkage void ovfl_vec(void) asm ("ovfl");
+ asmlinkage void unfl_vec(void) asm ("unfl");
+ asmlinkage void snan_vec(void) asm ("snan");
+ asmlinkage void operr_vec(void) asm ("operr");
+ asmlinkage void bsun_vec(void) asm ("bsun");
+ asmlinkage void fline_vec(void) asm ("fline");
+ asmlinkage void unsupp_vec(void) asm ("unsupp");
+
+ vectors[VEC_FPDIVZ] = dz_vec;
+ vectors[VEC_FPIR] = inex_vec;
+ vectors[VEC_FPOVER] = ovfl_vec;
+ vectors[VEC_FPUNDER] = unfl_vec;
+ vectors[VEC_FPNAN] = snan_vec;
+ vectors[VEC_FPOE] = operr_vec;
+ vectors[VEC_FPBRUC] = bsun_vec;
+ vectors[VEC_LINE11] = fline_vec;
+ vectors[VEC_FPUNSUP] = unsupp_vec;
+ }
+
+ if (CPU_IS_060 && !FPU_IS_EMU) {
+ /* set up IFPSP entry points */
+ asmlinkage void snan_vec(void) asm ("_060_fpsp_snan");
+ asmlinkage void operr_vec(void) asm ("_060_fpsp_operr");
+ asmlinkage void ovfl_vec(void) asm ("_060_fpsp_ovfl");
+ asmlinkage void unfl_vec(void) asm ("_060_fpsp_unfl");
+ asmlinkage void dz_vec(void) asm ("_060_fpsp_dz");
+ asmlinkage void inex_vec(void) asm ("_060_fpsp_inex");
+ asmlinkage void fline_vec(void) asm ("_060_fpsp_fline");
+ asmlinkage void unsupp_vec(void) asm ("_060_fpsp_unsupp");
+ asmlinkage void effadd_vec(void) asm ("_060_fpsp_effadd");
+
+ vectors[VEC_FPNAN] = snan_vec;
+ vectors[VEC_FPOE] = operr_vec;
+ vectors[VEC_FPOVER] = ovfl_vec;
+ vectors[VEC_FPUNDER] = unfl_vec;
+ vectors[VEC_FPDIVZ] = dz_vec;
+ vectors[VEC_FPIR] = inex_vec;
+ vectors[VEC_LINE11] = fline_vec;
+ vectors[VEC_FPUNSUP] = unsupp_vec;
+ vectors[VEC_UNIMPEA] = effadd_vec;
+ }
+
+ /* if running on an amiga, make the NMI interrupt do nothing */
+ if (MACH_IS_AMIGA) {
+ vectors[VEC_INT7] = nmihandler;
+ }
+}
+
+
+static const char *vec_names[] = {
+ [VEC_RESETSP] = "RESET SP",
+ [VEC_RESETPC] = "RESET PC",
+ [VEC_BUSERR] = "BUS ERROR",
+ [VEC_ADDRERR] = "ADDRESS ERROR",
+ [VEC_ILLEGAL] = "ILLEGAL INSTRUCTION",
+ [VEC_ZERODIV] = "ZERO DIVIDE",
+ [VEC_CHK] = "CHK",
+ [VEC_TRAP] = "TRAPcc",
+ [VEC_PRIV] = "PRIVILEGE VIOLATION",
+ [VEC_TRACE] = "TRACE",
+ [VEC_LINE10] = "LINE 1010",
+ [VEC_LINE11] = "LINE 1111",
+ [VEC_RESV12] = "UNASSIGNED RESERVED 12",
+ [VEC_COPROC] = "COPROCESSOR PROTOCOL VIOLATION",
+ [VEC_FORMAT] = "FORMAT ERROR",
+ [VEC_UNINT] = "UNINITIALIZED INTERRUPT",
+ [VEC_RESV16] = "UNASSIGNED RESERVED 16",
+ [VEC_RESV17] = "UNASSIGNED RESERVED 17",
+ [VEC_RESV18] = "UNASSIGNED RESERVED 18",
+ [VEC_RESV19] = "UNASSIGNED RESERVED 19",
+ [VEC_RESV20] = "UNASSIGNED RESERVED 20",
+ [VEC_RESV21] = "UNASSIGNED RESERVED 21",
+ [VEC_RESV22] = "UNASSIGNED RESERVED 22",
+ [VEC_RESV23] = "UNASSIGNED RESERVED 23",
+ [VEC_SPUR] = "SPURIOUS INTERRUPT",
+ [VEC_INT1] = "LEVEL 1 INT",
+ [VEC_INT2] = "LEVEL 2 INT",
+ [VEC_INT3] = "LEVEL 3 INT",
+ [VEC_INT4] = "LEVEL 4 INT",
+ [VEC_INT5] = "LEVEL 5 INT",
+ [VEC_INT6] = "LEVEL 6 INT",
+ [VEC_INT7] = "LEVEL 7 INT",
+ [VEC_SYS] = "SYSCALL",
+ [VEC_TRAP1] = "TRAP #1",
+ [VEC_TRAP2] = "TRAP #2",
+ [VEC_TRAP3] = "TRAP #3",
+ [VEC_TRAP4] = "TRAP #4",
+ [VEC_TRAP5] = "TRAP #5",
+ [VEC_TRAP6] = "TRAP #6",
+ [VEC_TRAP7] = "TRAP #7",
+ [VEC_TRAP8] = "TRAP #8",
+ [VEC_TRAP9] = "TRAP #9",
+ [VEC_TRAP10] = "TRAP #10",
+ [VEC_TRAP11] = "TRAP #11",
+ [VEC_TRAP12] = "TRAP #12",
+ [VEC_TRAP13] = "TRAP #13",
+ [VEC_TRAP14] = "TRAP #14",
+ [VEC_TRAP15] = "TRAP #15",
+ [VEC_FPBRUC] = "FPCP BSUN",
+ [VEC_FPIR] = "FPCP INEXACT",
+ [VEC_FPDIVZ] = "FPCP DIV BY 0",
+ [VEC_FPUNDER] = "FPCP UNDERFLOW",
+ [VEC_FPOE] = "FPCP OPERAND ERROR",
+ [VEC_FPOVER] = "FPCP OVERFLOW",
+ [VEC_FPNAN] = "FPCP SNAN",
+ [VEC_FPUNSUP] = "FPCP UNSUPPORTED OPERATION",
+ [VEC_MMUCFG] = "MMU CONFIGURATION ERROR",
+ [VEC_MMUILL] = "MMU ILLEGAL OPERATION ERROR",
+ [VEC_MMUACC] = "MMU ACCESS LEVEL VIOLATION ERROR",
+ [VEC_RESV59] = "UNASSIGNED RESERVED 59",
+ [VEC_UNIMPEA] = "UNASSIGNED RESERVED 60",
+ [VEC_UNIMPII] = "UNASSIGNED RESERVED 61",
+ [VEC_RESV62] = "UNASSIGNED RESERVED 62",
+ [VEC_RESV63] = "UNASSIGNED RESERVED 63",
+};
+
+static const char *space_names[] = {
+ [0] = "Space 0",
+ [USER_DATA] = "User Data",
+ [USER_PROGRAM] = "User Program",
+#ifndef CONFIG_SUN3
+ [3] = "Space 3",
+#else
+ [FC_CONTROL] = "Control",
+#endif
+ [4] = "Space 4",
+ [SUPER_DATA] = "Super Data",
+ [SUPER_PROGRAM] = "Super Program",
+ [CPU_SPACE] = "CPU"
+};
+
+void die_if_kernel(char *,struct pt_regs *,int);
+asmlinkage int do_page_fault(struct pt_regs *regs, unsigned long address,
+ unsigned long error_code);
+int send_fault_sig(struct pt_regs *regs);
+
+asmlinkage void trap_c(struct frame *fp);
+
+#if defined (CONFIG_M68060)
+static inline void access_error060 (struct frame *fp)
+{
+ unsigned long fslw = fp->un.fmt4.pc; /* is really FSLW for access error */
+
+#ifdef DEBUG
+ printk("fslw=%#lx, fa=%#lx\n", fslw, fp->un.fmt4.effaddr);
+#endif
+
+ if (fslw & MMU060_BPE) {
+ /* branch prediction error -> clear branch cache */
+ __asm__ __volatile__ ("movec %/cacr,%/d0\n\t"
+ "orl #0x00400000,%/d0\n\t"
+ "movec %/d0,%/cacr"
+ : : : "d0" );
+ /* return if there's no other error */
+ if (!(fslw & MMU060_ERR_BITS) && !(fslw & MMU060_SEE))
+ return;
+ }
+
+ if (fslw & (MMU060_DESC_ERR | MMU060_WP | MMU060_SP)) {
+ unsigned long errorcode;
+ unsigned long addr = fp->un.fmt4.effaddr;
+
+ if (fslw & MMU060_MA)
+ addr = (addr + PAGE_SIZE - 1) & PAGE_MASK;
+
+ errorcode = 1;
+ if (fslw & MMU060_DESC_ERR) {
+ __flush_tlb040_one(addr);
+ errorcode = 0;
+ }
+ if (fslw & MMU060_W)
+ errorcode |= 2;
+#ifdef DEBUG
+ printk("errorcode = %d\n", errorcode );
+#endif
+ do_page_fault(&fp->ptregs, addr, errorcode);
+ } else if (fslw & (MMU060_SEE)){
+ /* Software Emulation Error.
+ * fault during mem_read/mem_write in ifpsp060/os.S
+ */
+ send_fault_sig(&fp->ptregs);
+ } else if (!(fslw & (MMU060_RE|MMU060_WE)) ||
+ send_fault_sig(&fp->ptregs) > 0) {
+ printk("pc=%#lx, fa=%#lx\n", fp->ptregs.pc, fp->un.fmt4.effaddr);
+ printk( "68060 access error, fslw=%lx\n", fslw );
+ trap_c( fp );
+ }
+}
+#endif /* CONFIG_M68060 */
+
+#if defined (CONFIG_M68040)
+static inline unsigned long probe040(int iswrite, unsigned long addr, int wbs)
+{
+ unsigned long mmusr;
+ mm_segment_t old_fs = get_fs();
+
+ set_fs(MAKE_MM_SEG(wbs));
+
+ if (iswrite)
+ asm volatile (".chip 68040; ptestw (%0); .chip 68k" : : "a" (addr));
+ else
+ asm volatile (".chip 68040; ptestr (%0); .chip 68k" : : "a" (addr));
+
+ asm volatile (".chip 68040; movec %%mmusr,%0; .chip 68k" : "=r" (mmusr));
+
+ set_fs(old_fs);
+
+ return mmusr;
+}
+
+static inline int do_040writeback1(unsigned short wbs, unsigned long wba,
+ unsigned long wbd)
+{
+ int res = 0;
+ mm_segment_t old_fs = get_fs();
+
+ /* set_fs can not be moved, otherwise put_user() may oops */
+ set_fs(MAKE_MM_SEG(wbs));
+
+ switch (wbs & WBSIZ_040) {
+ case BA_SIZE_BYTE:
+ res = put_user(wbd & 0xff, (char *)wba);
+ break;
+ case BA_SIZE_WORD:
+ res = put_user(wbd & 0xffff, (short *)wba);
+ break;
+ case BA_SIZE_LONG:
+ res = put_user(wbd, (int *)wba);
+ break;
+ }
+
+ /* set_fs can not be moved, otherwise put_user() may oops */
+ set_fs(old_fs);
+
+
+#ifdef DEBUG
+ printk("do_040writeback1, res=%d\n",res);
+#endif
+
+ return res;
+}
+
+/* after an exception in a writeback the stack frame corresponding
+ * to that exception is discarded, set a few bits in the old frame
+ * to simulate what it should look like
+ */
+static inline void fix_xframe040(struct frame *fp, unsigned long wba, unsigned short wbs)
+{
+ fp->un.fmt7.faddr = wba;
+ fp->un.fmt7.ssw = wbs & 0xff;
+ if (wba != current->thread.faddr)
+ fp->un.fmt7.ssw |= MA_040;
+}
+
+static inline void do_040writebacks(struct frame *fp)
+{
+ int res = 0;
+#if 0
+ if (fp->un.fmt7.wb1s & WBV_040)
+ printk("access_error040: cannot handle 1st writeback. oops.\n");
+#endif
+
+ if ((fp->un.fmt7.wb2s & WBV_040) &&
+ !(fp->un.fmt7.wb2s & WBTT_040)) {
+ res = do_040writeback1(fp->un.fmt7.wb2s, fp->un.fmt7.wb2a,
+ fp->un.fmt7.wb2d);
+ if (res)
+ fix_xframe040(fp, fp->un.fmt7.wb2a, fp->un.fmt7.wb2s);
+ else
+ fp->un.fmt7.wb2s = 0;
+ }
+
+ /* do the 2nd wb only if the first one was successful (except for a kernel wb) */
+ if (fp->un.fmt7.wb3s & WBV_040 && (!res || fp->un.fmt7.wb3s & 4)) {
+ res = do_040writeback1(fp->un.fmt7.wb3s, fp->un.fmt7.wb3a,
+ fp->un.fmt7.wb3d);
+ if (res)
+ {
+ fix_xframe040(fp, fp->un.fmt7.wb3a, fp->un.fmt7.wb3s);
+
+ fp->un.fmt7.wb2s = fp->un.fmt7.wb3s;
+ fp->un.fmt7.wb3s &= (~WBV_040);
+ fp->un.fmt7.wb2a = fp->un.fmt7.wb3a;
+ fp->un.fmt7.wb2d = fp->un.fmt7.wb3d;
+ }
+ else
+ fp->un.fmt7.wb3s = 0;
+ }
+
+ if (res)
+ send_fault_sig(&fp->ptregs);
+}
+
+/*
+ * called from sigreturn(), must ensure userspace code didn't
+ * manipulate exception frame to circumvent protection, then complete
+ * pending writebacks
+ * we just clear TM2 to turn it into an userspace access
+ */
+asmlinkage void berr_040cleanup(struct frame *fp)
+{
+ fp->un.fmt7.wb2s &= ~4;
+ fp->un.fmt7.wb3s &= ~4;
+
+ do_040writebacks(fp);
+}
+
+static inline void access_error040(struct frame *fp)
+{
+ unsigned short ssw = fp->un.fmt7.ssw;
+ unsigned long mmusr;
+
+#ifdef DEBUG
+ printk("ssw=%#x, fa=%#lx\n", ssw, fp->un.fmt7.faddr);
+ printk("wb1s=%#x, wb2s=%#x, wb3s=%#x\n", fp->un.fmt7.wb1s,
+ fp->un.fmt7.wb2s, fp->un.fmt7.wb3s);
+ printk ("wb2a=%lx, wb3a=%lx, wb2d=%lx, wb3d=%lx\n",
+ fp->un.fmt7.wb2a, fp->un.fmt7.wb3a,
+ fp->un.fmt7.wb2d, fp->un.fmt7.wb3d);
+#endif
+
+ if (ssw & ATC_040) {
+ unsigned long addr = fp->un.fmt7.faddr;
+ unsigned long errorcode;
+
+ /*
+ * The MMU status has to be determined AFTER the address
+ * has been corrected if there was a misaligned access (MA).
+ */
+ if (ssw & MA_040)
+ addr = (addr + 7) & -8;
+
+ /* MMU error, get the MMUSR info for this access */
+ mmusr = probe040(!(ssw & RW_040), addr, ssw);
+#ifdef DEBUG
+ printk("mmusr = %lx\n", mmusr);
+#endif
+ errorcode = 1;
+ if (!(mmusr & MMU_R_040)) {
+ /* clear the invalid atc entry */
+ __flush_tlb040_one(addr);
+ errorcode = 0;
+ }
+
+ /* despite what documentation seems to say, RMW
+ * accesses have always both the LK and RW bits set */
+ if (!(ssw & RW_040) || (ssw & LK_040))
+ errorcode |= 2;
+
+ if (do_page_fault(&fp->ptregs, addr, errorcode)) {
+#ifdef DEBUG
+ printk("do_page_fault() !=0 \n");
+#endif
+ if (user_mode(&fp->ptregs)){
+ /* delay writebacks after signal delivery */
+#ifdef DEBUG
+ printk(".. was usermode - return\n");
+#endif
+ return;
+ }
+ /* disable writeback into user space from kernel
+ * (if do_page_fault didn't fix the mapping,
+ * the writeback won't do good)
+ */
+#ifdef DEBUG
+ printk(".. disabling wb2\n");
+#endif
+ if (fp->un.fmt7.wb2a == fp->un.fmt7.faddr)
+ fp->un.fmt7.wb2s &= ~WBV_040;
+ }
+ } else if (send_fault_sig(&fp->ptregs) > 0) {
+ printk("68040 access error, ssw=%x\n", ssw);
+ trap_c(fp);
+ }
+
+ do_040writebacks(fp);
+}
+#endif /* CONFIG_M68040 */
+
+#if defined(CONFIG_SUN3)
+#include <asm/sun3mmu.h>
+
+extern int mmu_emu_handle_fault (unsigned long, int, int);
+
+/* sun3 version of bus_error030 */
+
+static inline void bus_error030 (struct frame *fp)
+{
+ unsigned char buserr_type = sun3_get_buserr ();
+ unsigned long addr, errorcode;
+ unsigned short ssw = fp->un.fmtb.ssw;
+ extern unsigned long _sun3_map_test_start, _sun3_map_test_end;
+
+#ifdef DEBUG
+ if (ssw & (FC | FB))
+ printk ("Instruction fault at %#010lx\n",
+ ssw & FC ?
+ fp->ptregs.format == 0xa ? fp->ptregs.pc + 2 : fp->un.fmtb.baddr - 2
+ :
+ fp->ptregs.format == 0xa ? fp->ptregs.pc + 4 : fp->un.fmtb.baddr);
+ if (ssw & DF)
+ printk ("Data %s fault at %#010lx in %s (pc=%#lx)\n",
+ ssw & RW ? "read" : "write",
+ fp->un.fmtb.daddr,
+ space_names[ssw & DFC], fp->ptregs.pc);
+#endif
+
+ /*
+ * Check if this page should be demand-mapped. This needs to go before
+ * the testing for a bad kernel-space access (demand-mapping applies
+ * to kernel accesses too).
+ */
+
+ if ((ssw & DF)
+ && (buserr_type & (SUN3_BUSERR_PROTERR | SUN3_BUSERR_INVALID))) {
+ if (mmu_emu_handle_fault (fp->un.fmtb.daddr, ssw & RW, 0))
+ return;
+ }
+
+ /* Check for kernel-space pagefault (BAD). */
+ if (fp->ptregs.sr & PS_S) {
+ /* kernel fault must be a data fault to user space */
+ if (! ((ssw & DF) && ((ssw & DFC) == USER_DATA))) {
+ // try checking the kernel mappings before surrender
+ if (mmu_emu_handle_fault (fp->un.fmtb.daddr, ssw & RW, 1))
+ return;
+ /* instruction fault or kernel data fault! */
+ if (ssw & (FC | FB))
+ printk ("Instruction fault at %#010lx\n",
+ fp->ptregs.pc);
+ if (ssw & DF) {
+ /* was this fault incurred testing bus mappings? */
+ if((fp->ptregs.pc >= (unsigned long)&_sun3_map_test_start) &&
+ (fp->ptregs.pc <= (unsigned long)&_sun3_map_test_end)) {
+ send_fault_sig(&fp->ptregs);
+ return;
+ }
+
+ printk ("Data %s fault at %#010lx in %s (pc=%#lx)\n",
+ ssw & RW ? "read" : "write",
+ fp->un.fmtb.daddr,
+ space_names[ssw & DFC], fp->ptregs.pc);
+ }
+ printk ("BAD KERNEL BUSERR\n");
+
+ die_if_kernel("Oops", &fp->ptregs,0);
+ force_sig(SIGKILL, current);
+ return;
+ }
+ } else {
+ /* user fault */
+ if (!(ssw & (FC | FB)) && !(ssw & DF))
+ /* not an instruction fault or data fault! BAD */
+ panic ("USER BUSERR w/o instruction or data fault");
+ }
+
+
+ /* First handle the data fault, if any. */
+ if (ssw & DF) {
+ addr = fp->un.fmtb.daddr;
+
+// errorcode bit 0: 0 -> no page 1 -> protection fault
+// errorcode bit 1: 0 -> read fault 1 -> write fault
+
+// (buserr_type & SUN3_BUSERR_PROTERR) -> protection fault
+// (buserr_type & SUN3_BUSERR_INVALID) -> invalid page fault
+
+ if (buserr_type & SUN3_BUSERR_PROTERR)
+ errorcode = 0x01;
+ else if (buserr_type & SUN3_BUSERR_INVALID)
+ errorcode = 0x00;
+ else {
+#ifdef DEBUG
+ printk ("*** unexpected busfault type=%#04x\n", buserr_type);
+ printk ("invalid %s access at %#lx from pc %#lx\n",
+ !(ssw & RW) ? "write" : "read", addr,
+ fp->ptregs.pc);
+#endif
+ die_if_kernel ("Oops", &fp->ptregs, buserr_type);
+ force_sig (SIGBUS, current);
+ return;
+ }
+
+//todo: wtf is RM bit? --m
+ if (!(ssw & RW) || ssw & RM)
+ errorcode |= 0x02;
+
+ /* Handle page fault. */
+ do_page_fault (&fp->ptregs, addr, errorcode);
+
+ /* Retry the data fault now. */
+ return;
+ }
+
+ /* Now handle the instruction fault. */
+
+ /* Get the fault address. */
+ if (fp->ptregs.format == 0xA)
+ addr = fp->ptregs.pc + 4;
+ else
+ addr = fp->un.fmtb.baddr;
+ if (ssw & FC)
+ addr -= 2;
+
+ if (buserr_type & SUN3_BUSERR_INVALID) {
+ if (!mmu_emu_handle_fault (fp->un.fmtb.daddr, 1, 0))
+ do_page_fault (&fp->ptregs, addr, 0);
+ } else {
+#ifdef DEBUG
+ printk ("protection fault on insn access (segv).\n");
+#endif
+ force_sig (SIGSEGV, current);
+ }
+}
+#else
+#if defined(CPU_M68020_OR_M68030)
+static inline void bus_error030 (struct frame *fp)
+{
+ volatile unsigned short temp;
+ unsigned short mmusr;
+ unsigned long addr, errorcode;
+ unsigned short ssw = fp->un.fmtb.ssw;
+#ifdef DEBUG
+ unsigned long desc;
+
+ printk ("pid = %x ", current->pid);
+ printk ("SSW=%#06x ", ssw);
+
+ if (ssw & (FC | FB))
+ printk ("Instruction fault at %#010lx\n",
+ ssw & FC ?
+ fp->ptregs.format == 0xa ? fp->ptregs.pc + 2 : fp->un.fmtb.baddr - 2
+ :
+ fp->ptregs.format == 0xa ? fp->ptregs.pc + 4 : fp->un.fmtb.baddr);
+ if (ssw & DF)
+ printk ("Data %s fault at %#010lx in %s (pc=%#lx)\n",
+ ssw & RW ? "read" : "write",
+ fp->un.fmtb.daddr,
+ space_names[ssw & DFC], fp->ptregs.pc);
+#endif
+
+ /* ++andreas: If a data fault and an instruction fault happen
+ at the same time map in both pages. */
+
+ /* First handle the data fault, if any. */
+ if (ssw & DF) {
+ addr = fp->un.fmtb.daddr;
+
+#ifdef DEBUG
+ asm volatile ("ptestr %3,%2@,#7,%0\n\t"
+ "pmove %%psr,%1@"
+ : "=a&" (desc)
+ : "a" (&temp), "a" (addr), "d" (ssw));
+#else
+ asm volatile ("ptestr %2,%1@,#7\n\t"
+ "pmove %%psr,%0@"
+ : : "a" (&temp), "a" (addr), "d" (ssw));
+#endif
+ mmusr = temp;
+
+#ifdef DEBUG
+ printk("mmusr is %#x for addr %#lx in task %p\n",
+ mmusr, addr, current);
+ printk("descriptor address is %#lx, contents %#lx\n",
+ __va(desc), *(unsigned long *)__va(desc));
+#endif
+
+ errorcode = (mmusr & MMU_I) ? 0 : 1;
+ if (!(ssw & RW) || (ssw & RM))
+ errorcode |= 2;
+
+ if (mmusr & (MMU_I | MMU_WP)) {
+ if (ssw & 4) {
+ printk("Data %s fault at %#010lx in %s (pc=%#lx)\n",
+ ssw & RW ? "read" : "write",
+ fp->un.fmtb.daddr,
+ space_names[ssw & DFC], fp->ptregs.pc);
+ goto buserr;
+ }
+ /* Don't try to do anything further if an exception was
+ handled. */
+ if (do_page_fault (&fp->ptregs, addr, errorcode) < 0)
+ return;
+ } else if (!(mmusr & MMU_I)) {
+ /* probably a 020 cas fault */
+ if (!(ssw & RM) && send_fault_sig(&fp->ptregs) > 0)
+ printk("unexpected bus error (%#x,%#x)\n", ssw, mmusr);
+ } else if (mmusr & (MMU_B|MMU_L|MMU_S)) {
+ printk("invalid %s access at %#lx from pc %#lx\n",
+ !(ssw & RW) ? "write" : "read", addr,
+ fp->ptregs.pc);
+ die_if_kernel("Oops",&fp->ptregs,mmusr);
+ force_sig(SIGSEGV, current);
+ return;
+ } else {
+#if 0
+ static volatile long tlong;
+#endif
+
+ printk("weird %s access at %#lx from pc %#lx (ssw is %#x)\n",
+ !(ssw & RW) ? "write" : "read", addr,
+ fp->ptregs.pc, ssw);
+ asm volatile ("ptestr #1,%1@,#0\n\t"
+ "pmove %%psr,%0@"
+ : /* no outputs */
+ : "a" (&temp), "a" (addr));
+ mmusr = temp;
+
+ printk ("level 0 mmusr is %#x\n", mmusr);
+#if 0
+ asm volatile ("pmove %%tt0,%0@"
+ : /* no outputs */
+ : "a" (&tlong));
+ printk("tt0 is %#lx, ", tlong);
+ asm volatile ("pmove %%tt1,%0@"
+ : /* no outputs */
+ : "a" (&tlong));
+ printk("tt1 is %#lx\n", tlong);
+#endif
+#ifdef DEBUG
+ printk("Unknown SIGSEGV - 1\n");
+#endif
+ die_if_kernel("Oops",&fp->ptregs,mmusr);
+ force_sig(SIGSEGV, current);
+ return;
+ }
+
+ /* setup an ATC entry for the access about to be retried */
+ if (!(ssw & RW) || (ssw & RM))
+ asm volatile ("ploadw %1,%0@" : /* no outputs */
+ : "a" (addr), "d" (ssw));
+ else
+ asm volatile ("ploadr %1,%0@" : /* no outputs */
+ : "a" (addr), "d" (ssw));
+ }
+
+ /* Now handle the instruction fault. */
+
+ if (!(ssw & (FC|FB)))
+ return;
+
+ if (fp->ptregs.sr & PS_S) {
+ printk("Instruction fault at %#010lx\n",
+ fp->ptregs.pc);
+ buserr:
+ printk ("BAD KERNEL BUSERR\n");
+ die_if_kernel("Oops",&fp->ptregs,0);
+ force_sig(SIGKILL, current);
+ return;
+ }
+
+ /* get the fault address */
+ if (fp->ptregs.format == 10)
+ addr = fp->ptregs.pc + 4;
+ else
+ addr = fp->un.fmtb.baddr;
+ if (ssw & FC)
+ addr -= 2;
+
+ if ((ssw & DF) && ((addr ^ fp->un.fmtb.daddr) & PAGE_MASK) == 0)
+ /* Insn fault on same page as data fault. But we
+ should still create the ATC entry. */
+ goto create_atc_entry;
+
+#ifdef DEBUG
+ asm volatile ("ptestr #1,%2@,#7,%0\n\t"
+ "pmove %%psr,%1@"
+ : "=a&" (desc)
+ : "a" (&temp), "a" (addr));
+#else
+ asm volatile ("ptestr #1,%1@,#7\n\t"
+ "pmove %%psr,%0@"
+ : : "a" (&temp), "a" (addr));
+#endif
+ mmusr = temp;
+
+#ifdef DEBUG
+ printk ("mmusr is %#x for addr %#lx in task %p\n",
+ mmusr, addr, current);
+ printk ("descriptor address is %#lx, contents %#lx\n",
+ __va(desc), *(unsigned long *)__va(desc));
+#endif
+
+ if (mmusr & MMU_I)
+ do_page_fault (&fp->ptregs, addr, 0);
+ else if (mmusr & (MMU_B|MMU_L|MMU_S)) {
+ printk ("invalid insn access at %#lx from pc %#lx\n",
+ addr, fp->ptregs.pc);
+#ifdef DEBUG
+ printk("Unknown SIGSEGV - 2\n");
+#endif
+ die_if_kernel("Oops",&fp->ptregs,mmusr);
+ force_sig(SIGSEGV, current);
+ return;
+ }
+
+create_atc_entry:
+ /* setup an ATC entry for the access about to be retried */
+ asm volatile ("ploadr #2,%0@" : /* no outputs */
+ : "a" (addr));
+}
+#endif /* CPU_M68020_OR_M68030 */
+#endif /* !CONFIG_SUN3 */
+
+asmlinkage void buserr_c(struct frame *fp)
+{
+ /* Only set esp0 if coming from user mode */
+ if (user_mode(&fp->ptregs))
+ current->thread.esp0 = (unsigned long) fp;
+
+#ifdef DEBUG
+ printk ("*** Bus Error *** Format is %x\n", fp->ptregs.format);
+#endif
+
+ switch (fp->ptregs.format) {
+#if defined (CONFIG_M68060)
+ case 4: /* 68060 access error */
+ access_error060 (fp);
+ break;
+#endif
+#if defined (CONFIG_M68040)
+ case 0x7: /* 68040 access error */
+ access_error040 (fp);
+ break;
+#endif
+#if defined (CPU_M68020_OR_M68030)
+ case 0xa:
+ case 0xb:
+ bus_error030 (fp);
+ break;
+#endif
+ default:
+ die_if_kernel("bad frame format",&fp->ptregs,0);
+#ifdef DEBUG
+ printk("Unknown SIGSEGV - 4\n");
+#endif
+ force_sig(SIGSEGV, current);
+ }
+}
+
+
+static int kstack_depth_to_print = 48;
+
+void show_trace(unsigned long *stack)
+{
+ unsigned long *endstack;
+ unsigned long addr;
+ int i;
+
+ printk("Call Trace:");
+ addr = (unsigned long)stack + THREAD_SIZE - 1;
+ endstack = (unsigned long *)(addr & -THREAD_SIZE);
+ i = 0;
+ while (stack + 1 <= endstack) {
+ addr = *stack++;
+ /*
+ * If the address is either in the text segment of the
+ * kernel, or in the region which contains vmalloc'ed
+ * memory, it *may* be the address of a calling
+ * routine; if so, print it so that someone tracing
+ * down the cause of the crash will be able to figure
+ * out the call path that was taken.
+ */
+ if (__kernel_text_address(addr)) {
+#ifndef CONFIG_KALLSYMS
+ if (i % 5 == 0)
+ printk("\n ");
+#endif
+ printk(" [<%08lx>]", addr);
+ print_symbol(" %s\n", addr);
+ i++;
+ }
+ }
+ printk("\n");
+}
+
+void show_registers(struct pt_regs *regs)
+{
+ struct frame *fp = (struct frame *)regs;
+ unsigned long addr;
+ int i;
+
+ addr = (unsigned long)&fp->un;
+ printk("Frame format=%X ", fp->ptregs.format);
+ switch (fp->ptregs.format) {
+ case 0x2:
+ printk("instr addr=%08lx\n", fp->un.fmt2.iaddr);
+ addr += sizeof(fp->un.fmt2);
+ break;
+ case 0x3:
+ printk("eff addr=%08lx\n", fp->un.fmt3.effaddr);
+ addr += sizeof(fp->un.fmt3);
+ break;
+ case 0x4:
+ printk((CPU_IS_060 ? "fault addr=%08lx fslw=%08lx\n"
+ : "eff addr=%08lx pc=%08lx\n"),
+ fp->un.fmt4.effaddr, fp->un.fmt4.pc);
+ addr += sizeof(fp->un.fmt4);
+ break;
+ case 0x7:
+ printk("eff addr=%08lx ssw=%04x faddr=%08lx\n",
+ fp->un.fmt7.effaddr, fp->un.fmt7.ssw, fp->un.fmt7.faddr);
+ printk("wb 1 stat/addr/data: %04x %08lx %08lx\n",
+ fp->un.fmt7.wb1s, fp->un.fmt7.wb1a, fp->un.fmt7.wb1dpd0);
+ printk("wb 2 stat/addr/data: %04x %08lx %08lx\n",
+ fp->un.fmt7.wb2s, fp->un.fmt7.wb2a, fp->un.fmt7.wb2d);
+ printk("wb 3 stat/addr/data: %04x %08lx %08lx\n",
+ fp->un.fmt7.wb3s, fp->un.fmt7.wb3a, fp->un.fmt7.wb3d);
+ printk("push data: %08lx %08lx %08lx %08lx\n",
+ fp->un.fmt7.wb1dpd0, fp->un.fmt7.pd1, fp->un.fmt7.pd2,
+ fp->un.fmt7.pd3);
+ addr += sizeof(fp->un.fmt7);
+ break;
+ case 0x9:
+ printk("instr addr=%08lx\n", fp->un.fmt9.iaddr);
+ addr += sizeof(fp->un.fmt9);
+ break;
+ case 0xa:
+ printk("ssw=%04x isc=%04x isb=%04x daddr=%08lx dobuf=%08lx\n",
+ fp->un.fmta.ssw, fp->un.fmta.isc, fp->un.fmta.isb,
+ fp->un.fmta.daddr, fp->un.fmta.dobuf);
+ addr += sizeof(fp->un.fmta);
+ break;
+ case 0xb:
+ printk("ssw=%04x isc=%04x isb=%04x daddr=%08lx dobuf=%08lx\n",
+ fp->un.fmtb.ssw, fp->un.fmtb.isc, fp->un.fmtb.isb,
+ fp->un.fmtb.daddr, fp->un.fmtb.dobuf);
+ printk("baddr=%08lx dibuf=%08lx ver=%x\n",
+ fp->un.fmtb.baddr, fp->un.fmtb.dibuf, fp->un.fmtb.ver);
+ addr += sizeof(fp->un.fmtb);
+ break;
+ default:
+ printk("\n");
+ }
+ show_stack(NULL, (unsigned long *)addr);
+
+ printk("Code: ");
+ for (i = 0; i < 10; i++)
+ printk("%04x ", 0xffff & ((short *) fp->ptregs.pc)[i]);
+ printk ("\n");
+}
+
+void show_stack(struct task_struct *task, unsigned long *stack)
+{
+ unsigned long *endstack;
+ int i;
+
+ if (!stack) {
+ if (task)
+ stack = (unsigned long *)task->thread.esp0;
+ else
+ stack = (unsigned long *)&stack;
+ }
+ endstack = (unsigned long *)(((unsigned long)stack + THREAD_SIZE - 1) & -THREAD_SIZE);
+
+ printk("Stack from %08lx:", (unsigned long)stack);
+ for (i = 0; i < kstack_depth_to_print; i++) {
+ if (stack + 1 > endstack)
+ break;
+ if (i % 8 == 0)
+ printk("\n ");
+ printk(" %08lx", *stack++);
+ }
+ printk("\n");
+ show_trace(stack);
+}
+
+/*
+ * The architecture-independent backtrace generator
+ */
+void dump_stack(void)
+{
+ unsigned long stack;
+
+ show_trace(&stack);
+}
+
+EXPORT_SYMBOL(dump_stack);
+
+void bad_super_trap (struct frame *fp)
+{
+ console_verbose();
+ if (fp->ptregs.vector < 4*sizeof(vec_names)/sizeof(vec_names[0]))
+ printk ("*** %s *** FORMAT=%X\n",
+ vec_names[(fp->ptregs.vector) >> 2],
+ fp->ptregs.format);
+ else
+ printk ("*** Exception %d *** FORMAT=%X\n",
+ (fp->ptregs.vector) >> 2,
+ fp->ptregs.format);
+ if (fp->ptregs.vector >> 2 == VEC_ADDRERR && CPU_IS_020_OR_030) {
+ unsigned short ssw = fp->un.fmtb.ssw;
+
+ printk ("SSW=%#06x ", ssw);
+
+ if (ssw & RC)
+ printk ("Pipe stage C instruction fault at %#010lx\n",
+ (fp->ptregs.format) == 0xA ?
+ fp->ptregs.pc + 2 : fp->un.fmtb.baddr - 2);
+ if (ssw & RB)
+ printk ("Pipe stage B instruction fault at %#010lx\n",
+ (fp->ptregs.format) == 0xA ?
+ fp->ptregs.pc + 4 : fp->un.fmtb.baddr);
+ if (ssw & DF)
+ printk ("Data %s fault at %#010lx in %s (pc=%#lx)\n",
+ ssw & RW ? "read" : "write",
+ fp->un.fmtb.daddr, space_names[ssw & DFC],
+ fp->ptregs.pc);
+ }
+ printk ("Current process id is %d\n", current->pid);
+ die_if_kernel("BAD KERNEL TRAP", &fp->ptregs, 0);
+}
+
+asmlinkage void trap_c(struct frame *fp)
+{
+ int sig;
+ siginfo_t info;
+
+ if (fp->ptregs.sr & PS_S) {
+ if ((fp->ptregs.vector >> 2) == VEC_TRACE) {
+ /* traced a trapping instruction */
+ current->ptrace |= PT_DTRACE;
+ } else
+ bad_super_trap(fp);
+ return;
+ }
+
+ /* send the appropriate signal to the user program */
+ switch ((fp->ptregs.vector) >> 2) {
+ case VEC_ADDRERR:
+ info.si_code = BUS_ADRALN;
+ sig = SIGBUS;
+ break;
+ case VEC_ILLEGAL:
+ case VEC_LINE10:
+ case VEC_LINE11:
+ info.si_code = ILL_ILLOPC;
+ sig = SIGILL;
+ break;
+ case VEC_PRIV:
+ info.si_code = ILL_PRVOPC;
+ sig = SIGILL;
+ break;
+ case VEC_COPROC:
+ info.si_code = ILL_COPROC;
+ sig = SIGILL;
+ break;
+ case VEC_TRAP1:
+ case VEC_TRAP2:
+ case VEC_TRAP3:
+ case VEC_TRAP4:
+ case VEC_TRAP5:
+ case VEC_TRAP6:
+ case VEC_TRAP7:
+ case VEC_TRAP8:
+ case VEC_TRAP9:
+ case VEC_TRAP10:
+ case VEC_TRAP11:
+ case VEC_TRAP12:
+ case VEC_TRAP13:
+ case VEC_TRAP14:
+ info.si_code = ILL_ILLTRP;
+ sig = SIGILL;
+ break;
+ case VEC_FPBRUC:
+ case VEC_FPOE:
+ case VEC_FPNAN:
+ info.si_code = FPE_FLTINV;
+ sig = SIGFPE;
+ break;
+ case VEC_FPIR:
+ info.si_code = FPE_FLTRES;
+ sig = SIGFPE;
+ break;
+ case VEC_FPDIVZ:
+ info.si_code = FPE_FLTDIV;
+ sig = SIGFPE;
+ break;
+ case VEC_FPUNDER:
+ info.si_code = FPE_FLTUND;
+ sig = SIGFPE;
+ break;
+ case VEC_FPOVER:
+ info.si_code = FPE_FLTOVF;
+ sig = SIGFPE;
+ break;
+ case VEC_ZERODIV:
+ info.si_code = FPE_INTDIV;
+ sig = SIGFPE;
+ break;
+ case VEC_CHK:
+ case VEC_TRAP:
+ info.si_code = FPE_INTOVF;
+ sig = SIGFPE;
+ break;
+ case VEC_TRACE: /* ptrace single step */
+ info.si_code = TRAP_TRACE;
+ sig = SIGTRAP;
+ break;
+ case VEC_TRAP15: /* breakpoint */
+ info.si_code = TRAP_BRKPT;
+ sig = SIGTRAP;
+ break;
+ default:
+ info.si_code = ILL_ILLOPC;
+ sig = SIGILL;
+ break;
+ }
+ info.si_signo = sig;
+ info.si_errno = 0;
+ switch (fp->ptregs.format) {
+ default:
+ info.si_addr = (void *) fp->ptregs.pc;
+ break;
+ case 2:
+ info.si_addr = (void *) fp->un.fmt2.iaddr;
+ break;
+ case 7:
+ info.si_addr = (void *) fp->un.fmt7.effaddr;
+ break;
+ case 9:
+ info.si_addr = (void *) fp->un.fmt9.iaddr;
+ break;
+ case 10:
+ info.si_addr = (void *) fp->un.fmta.daddr;
+ break;
+ case 11:
+ info.si_addr = (void *) fp->un.fmtb.daddr;
+ break;
+ }
+ force_sig_info (sig, &info, current);
+}
+
+void die_if_kernel (char *str, struct pt_regs *fp, int nr)
+{
+ if (!(fp->sr & PS_S))
+ return;
+
+ console_verbose();
+ printk("%s: %08x\n",str,nr);
+ print_modules();
+ printk("PC: [<%08lx>]",fp->pc);
+ print_symbol(" %s\n", fp->pc);
+ printk("\nSR: %04x SP: %p a2: %08lx\n",
+ fp->sr, fp, fp->a2);
+ printk("d0: %08lx d1: %08lx d2: %08lx d3: %08lx\n",
+ fp->d0, fp->d1, fp->d2, fp->d3);
+ printk("d4: %08lx d5: %08lx a0: %08lx a1: %08lx\n",
+ fp->d4, fp->d5, fp->a0, fp->a1);
+
+ printk("Process %s (pid: %d, stackpage=%08lx)\n",
+ current->comm, current->pid, PAGE_SIZE+(unsigned long)current);
+ show_stack(NULL, (unsigned long *)fp);
+ do_exit(SIGSEGV);
+}
+
+/*
+ * This function is called if an error occur while accessing
+ * user-space from the fpsp040 code.
+ */
+asmlinkage void fpsp040_die(void)
+{
+ do_exit(SIGSEGV);
+}
+
+#ifdef CONFIG_M68KFPU_EMU
+asmlinkage void fpemu_signal(int signal, int code, void *addr)
+{
+ siginfo_t info;
+
+ info.si_signo = signal;
+ info.si_errno = 0;
+ info.si_code = code;
+ info.si_addr = addr;
+ force_sig_info(signal, &info, current);
+}
+#endif
diff --git a/arch/m68k/kernel/vmlinux-std.lds b/arch/m68k/kernel/vmlinux-std.lds
new file mode 100644
index 00000000000..e58654f3f8d
--- /dev/null
+++ b/arch/m68k/kernel/vmlinux-std.lds
@@ -0,0 +1,95 @@
+/* ld script to make m68k Linux kernel */
+
+#include <asm-generic/vmlinux.lds.h>
+
+OUTPUT_FORMAT("elf32-m68k", "elf32-m68k", "elf32-m68k")
+OUTPUT_ARCH(m68k)
+ENTRY(_start)
+jiffies = jiffies_64 + 4;
+SECTIONS
+{
+ . = 0x1000;
+ _text = .; /* Text and read-only data */
+ .text : {
+ *(.text)
+ SCHED_TEXT
+ *(.fixup)
+ *(.gnu.warning)
+ } :text = 0x4e75
+
+ . = ALIGN(16); /* Exception table */
+ __start___ex_table = .;
+ __ex_table : { *(__ex_table) }
+ __stop___ex_table = .;
+
+ RODATA
+
+ _etext = .; /* End of text section */
+
+ .data : { /* Data */
+ *(.data)
+ CONSTRUCTORS
+ }
+
+ .bss : { *(.bss) } /* BSS */
+
+ . = ALIGN(16);
+ .data.cacheline_aligned : { *(.data.cacheline_aligned) } :data
+
+ _edata = .; /* End of data section */
+
+ /* will be freed after init */
+ . = ALIGN(4096); /* Init code and data */
+ __init_begin = .;
+ .init.text : {
+ _sinittext = .;
+ *(.init.text)
+ _einittext = .;
+ }
+ .init.data : { *(.init.data) }
+ . = ALIGN(16);
+ __setup_start = .;
+ .init.setup : { *(.init.setup) }
+ __setup_end = .;
+ __initcall_start = .;
+ .initcall.init : {
+ *(.initcall1.init)
+ *(.initcall2.init)
+ *(.initcall3.init)
+ *(.initcall4.init)
+ *(.initcall5.init)
+ *(.initcall6.init)
+ *(.initcall7.init)
+ }
+ __initcall_end = .;
+ __con_initcall_start = .;
+ .con_initcall.init : { *(.con_initcall.init) }
+ __con_initcall_end = .;
+ SECURITY_INIT
+ . = ALIGN(8192);
+ __initramfs_start = .;
+ .init.ramfs : { *(.init.ramfs) }
+ __initramfs_end = .;
+ . = ALIGN(8192);
+ __init_end = .;
+
+ .data.init_task : { *(.data.init_task) } /* The initial task and kernel stack */
+
+ _end = . ;
+
+ /* Sections to be discarded */
+ /DISCARD/ : {
+ *(.exit.text)
+ *(.exit.data)
+ *(.exitcall.exit)
+ }
+
+ /* Stabs debugging sections. */
+ .stab 0 : { *(.stab) }
+ .stabstr 0 : { *(.stabstr) }
+ .stab.excl 0 : { *(.stab.excl) }
+ .stab.exclstr 0 : { *(.stab.exclstr) }
+ .stab.index 0 : { *(.stab.index) }
+ .stab.indexstr 0 : { *(.stab.indexstr) }
+ .comment 0 : { *(.comment) }
+}
diff --git a/arch/m68k/kernel/vmlinux-sun3.lds b/arch/m68k/kernel/vmlinux-sun3.lds
new file mode 100644
index 00000000000..cc37e8d3c1e
--- /dev/null
+++ b/arch/m68k/kernel/vmlinux-sun3.lds
@@ -0,0 +1,95 @@
+/* ld script to make m68k Linux kernel */
+
+#include <asm-generic/vmlinux.lds.h>
+
+OUTPUT_FORMAT("elf32-m68k", "elf32-m68k", "elf32-m68k")
+OUTPUT_ARCH(m68k)
+ENTRY(_start)
+jiffies = jiffies_64 + 4;
+SECTIONS
+{
+ . = 0xE004000;
+ _text = .; /* Text and read-only data */
+ .text : {
+ *(.head)
+ *(.text)
+ SCHED_TEXT
+ *(.fixup)
+ *(.gnu.warning)
+ } :text = 0x4e75
+ RODATA
+
+ _etext = .; /* End of text section */
+
+ .data : { /* Data */
+ *(.data)
+ CONSTRUCTORS
+ . = ALIGN(16); /* Exception table */
+ __start___ex_table = .;
+ *(__ex_table)
+ __stop___ex_table = .;
+ } :data
+ /* End of data goes *here* so that freeing init code works properly. */
+ _edata = .;
+
+ /* will be freed after init */
+ . = ALIGN(8192); /* Init code and data */
+__init_begin = .;
+ .init.text : {
+ _sinittext = .;
+ *(.init.text)
+ _einittext = .;
+ }
+ .init.data : { *(.init.data) }
+ . = ALIGN(16);
+ __setup_start = .;
+ .init.setup : { *(.init.setup) }
+ __setup_end = .;
+ __initcall_start = .;
+ .initcall.init : {
+ *(.initcall1.init)
+ *(.initcall2.init)
+ *(.initcall3.init)
+ *(.initcall4.init)
+ *(.initcall5.init)
+ *(.initcall6.init)
+ *(.initcall7.init)
+ }
+ __initcall_end = .;
+ __con_initcall_start = .;
+ .con_initcall.init : { *(.con_initcall.init) }
+ __con_initcall_end = .;
+ SECURITY_INIT
+ . = ALIGN(8192);
+ __initramfs_start = .;
+ .init.ramfs : { *(.init.ramfs) }
+ __initramfs_end = .;
+ . = ALIGN(8192);
+ __init_end = .;
+ .init.task : { *(init_task) }
+
+
+ .bss : { *(.bss) } /* BSS */
+
+ _end = . ;
+
+ /* Sections to be discarded */
+ /DISCARD/ : {
+ *(.exit.text)
+ *(.exit.data)
+ *(.exitcall.exit)
+ }
+
+ .crap : {
+ /* Stabs debugging sections. */
+ *(.stab)
+ *(.stabstr)
+ *(.stab.excl)
+ *(.stab.exclstr)
+ *(.stab.index)
+ *(.stab.indexstr)
+ *(.comment)
+ *(.note)
+ }
+
+}
diff --git a/arch/m68k/kernel/vmlinux.lds.S b/arch/m68k/kernel/vmlinux.lds.S
new file mode 100644
index 00000000000..497b924f3c8
--- /dev/null
+++ b/arch/m68k/kernel/vmlinux.lds.S
@@ -0,0 +1,11 @@
+#include <linux/config.h>
+PHDRS
+{
+ text PT_LOAD FILEHDR PHDRS FLAGS (7);
+ data PT_LOAD FLAGS (7);
+}
+#ifdef CONFIG_SUN3
+#include "vmlinux-sun3.lds"
+#else
+#include "vmlinux-std.lds"
+#endif