diff options
author | Dmitry Torokhov <dmitry.torokhov@gmail.com> | 2007-10-12 21:27:47 -0400 |
---|---|---|
committer | Dmitry Torokhov <dmitry.torokhov@gmail.com> | 2007-10-12 21:27:47 -0400 |
commit | b981d8b3f5e008ff10d993be633ad00564fc22cd (patch) | |
tree | e292dc07b22308912cf6a58354a608b9e5e8e1fd /include/asm-generic | |
parent | b11d2127c4893a7315d1e16273bc8560049fa3ca (diff) | |
parent | 2b9e0aae1d50e880c58d46788e5e3ebd89d75d62 (diff) |
Merge master.kernel.org:/pub/scm/linux/kernel/git/torvalds/linux-2.6
Conflicts:
drivers/macintosh/adbhid.c
Diffstat (limited to 'include/asm-generic')
-rw-r--r-- | include/asm-generic/bug.h | 6 | ||||
-rw-r--r-- | include/asm-generic/libata-portmap.h | 5 | ||||
-rw-r--r-- | include/asm-generic/percpu.h | 8 | ||||
-rw-r--r-- | include/asm-generic/pgtable.h | 73 | ||||
-rw-r--r-- | include/asm-generic/termios.h | 2 | ||||
-rw-r--r-- | include/asm-generic/unaligned.h | 12 | ||||
-rw-r--r-- | include/asm-generic/vmlinux.lds.h | 14 |
7 files changed, 70 insertions, 50 deletions
diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h index 344e3091af2..d56fedbb457 100644 --- a/include/asm-generic/bug.h +++ b/include/asm-generic/bug.h @@ -33,7 +33,7 @@ struct bug_entry { #ifndef HAVE_ARCH_WARN_ON #define WARN_ON(condition) ({ \ - typeof(condition) __ret_warn_on = (condition); \ + int __ret_warn_on = !!(condition); \ if (unlikely(__ret_warn_on)) { \ printk("WARNING: at %s:%d %s()\n", __FILE__, \ __LINE__, __FUNCTION__); \ @@ -54,7 +54,7 @@ struct bug_entry { #ifndef HAVE_ARCH_WARN_ON #define WARN_ON(condition) ({ \ - typeof(condition) __ret_warn_on = (condition); \ + int __ret_warn_on = !!(condition); \ unlikely(__ret_warn_on); \ }) #endif @@ -62,7 +62,7 @@ struct bug_entry { #define WARN_ON_ONCE(condition) ({ \ static int __warned; \ - typeof(condition) __ret_warn_once = (condition); \ + int __ret_warn_once = !!(condition); \ \ if (unlikely(__ret_warn_once)) \ if (WARN_ON(!__warned)) \ diff --git a/include/asm-generic/libata-portmap.h b/include/asm-generic/libata-portmap.h index 62fb3618293..cf14f2ff40b 100644 --- a/include/asm-generic/libata-portmap.h +++ b/include/asm-generic/libata-portmap.h @@ -1,12 +1,7 @@ #ifndef __ASM_GENERIC_LIBATA_PORTMAP_H #define __ASM_GENERIC_LIBATA_PORTMAP_H -#define ATA_PRIMARY_CMD 0x1F0 -#define ATA_PRIMARY_CTL 0x3F6 #define ATA_PRIMARY_IRQ(dev) 14 - -#define ATA_SECONDARY_CMD 0x170 -#define ATA_SECONDARY_CTL 0x376 #define ATA_SECONDARY_IRQ(dev) 15 #endif diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h index d984a904143..d85172e9ed4 100644 --- a/include/asm-generic/percpu.h +++ b/include/asm-generic/percpu.h @@ -14,6 +14,11 @@ extern unsigned long __per_cpu_offset[NR_CPUS]; #define DEFINE_PER_CPU(type, name) \ __attribute__((__section__(".data.percpu"))) __typeof__(type) per_cpu__##name +#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \ + __attribute__((__section__(".data.percpu.shared_aligned"))) \ + __typeof__(type) per_cpu__##name \ + ____cacheline_aligned_in_smp + /* var is in discarded region: offset to particular copy we want */ #define per_cpu(var, cpu) (*({ \ extern int simple_identifier_##var(void); \ @@ -34,6 +39,9 @@ do { \ #define DEFINE_PER_CPU(type, name) \ __typeof__(type) per_cpu__##name +#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \ + DEFINE_PER_CPU(type, name) + #define per_cpu(var, cpu) (*((void)(cpu), &per_cpu__##var)) #define __get_cpu_var(var) per_cpu__##var #define __raw_get_cpu_var(var) per_cpu__##var diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h index f605e8d0eed..5f0d797d33f 100644 --- a/include/asm-generic/pgtable.h +++ b/include/asm-generic/pgtable.h @@ -2,6 +2,7 @@ #define _ASM_GENERIC_PGTABLE_H #ifndef __ASSEMBLY__ +#ifdef CONFIG_MMU #ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS /* @@ -133,41 +134,6 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addres #endif /* - * A facility to provide lazy MMU batching. This allows PTE updates and - * page invalidations to be delayed until a call to leave lazy MMU mode - * is issued. Some architectures may benefit from doing this, and it is - * beneficial for both shadow and direct mode hypervisors, which may batch - * the PTE updates which happen during this window. Note that using this - * interface requires that read hazards be removed from the code. A read - * hazard could result in the direct mode hypervisor case, since the actual - * write to the page tables may not yet have taken place, so reads though - * a raw PTE pointer after it has been modified are not guaranteed to be - * up to date. This mode can only be entered and left under the protection of - * the page table locks for all page tables which may be modified. In the UP - * case, this is required so that preemption is disabled, and in the SMP case, - * it must synchronize the delayed page table writes properly on other CPUs. - */ -#ifndef __HAVE_ARCH_ENTER_LAZY_MMU_MODE -#define arch_enter_lazy_mmu_mode() do {} while (0) -#define arch_leave_lazy_mmu_mode() do {} while (0) -#define arch_flush_lazy_mmu_mode() do {} while (0) -#endif - -/* - * A facility to provide batching of the reload of page tables with the - * actual context switch code for paravirtualized guests. By convention, - * only one of the lazy modes (CPU, MMU) should be active at any given - * time, entry should never be nested, and entry and exits should always - * be paired. This is for sanity of maintaining and reasoning about the - * kernel code. - */ -#ifndef __HAVE_ARCH_ENTER_LAZY_CPU_MODE -#define arch_enter_lazy_cpu_mode() do {} while (0) -#define arch_leave_lazy_cpu_mode() do {} while (0) -#define arch_flush_lazy_cpu_mode() do {} while (0) -#endif - -/* * When walking page tables, get the address of the next boundary, * or the end address of the range if that comes earlier. Although no * vma end wraps to 0, rounded up __boundary may wrap to 0 throughout. @@ -233,6 +199,43 @@ static inline int pmd_none_or_clear_bad(pmd_t *pmd) } return 0; } +#endif /* CONFIG_MMU */ + +/* + * A facility to provide lazy MMU batching. This allows PTE updates and + * page invalidations to be delayed until a call to leave lazy MMU mode + * is issued. Some architectures may benefit from doing this, and it is + * beneficial for both shadow and direct mode hypervisors, which may batch + * the PTE updates which happen during this window. Note that using this + * interface requires that read hazards be removed from the code. A read + * hazard could result in the direct mode hypervisor case, since the actual + * write to the page tables may not yet have taken place, so reads though + * a raw PTE pointer after it has been modified are not guaranteed to be + * up to date. This mode can only be entered and left under the protection of + * the page table locks for all page tables which may be modified. In the UP + * case, this is required so that preemption is disabled, and in the SMP case, + * it must synchronize the delayed page table writes properly on other CPUs. + */ +#ifndef __HAVE_ARCH_ENTER_LAZY_MMU_MODE +#define arch_enter_lazy_mmu_mode() do {} while (0) +#define arch_leave_lazy_mmu_mode() do {} while (0) +#define arch_flush_lazy_mmu_mode() do {} while (0) +#endif + +/* + * A facility to provide batching of the reload of page tables with the + * actual context switch code for paravirtualized guests. By convention, + * only one of the lazy modes (CPU, MMU) should be active at any given + * time, entry should never be nested, and entry and exits should always + * be paired. This is for sanity of maintaining and reasoning about the + * kernel code. + */ +#ifndef __HAVE_ARCH_ENTER_LAZY_CPU_MODE +#define arch_enter_lazy_cpu_mode() do {} while (0) +#define arch_leave_lazy_cpu_mode() do {} while (0) +#define arch_flush_lazy_cpu_mode() do {} while (0) +#endif + #endif /* !__ASSEMBLY__ */ #endif /* _ASM_GENERIC_PGTABLE_H */ diff --git a/include/asm-generic/termios.h b/include/asm-generic/termios.h index 3769e6bd63b..33dca30a3c4 100644 --- a/include/asm-generic/termios.h +++ b/include/asm-generic/termios.h @@ -63,6 +63,8 @@ static inline int kernel_termios_to_user_termio(struct termio __user *termio, #define user_termios_to_kernel_termios(k, u) copy_from_user(k, u, sizeof(struct termios)) #define kernel_termios_to_user_termios(u, k) copy_to_user(u, k, sizeof(struct termios)) +#define user_termios_to_kernel_termios_1(k, u) copy_from_user(k, u, sizeof(struct termios)) +#define kernel_termios_to_user_termios_1(u, k) copy_to_user(u, k, sizeof(struct termios)) #endif /* __ARCH_TERMIO_GETPUT */ diff --git a/include/asm-generic/unaligned.h b/include/asm-generic/unaligned.h index 16a466e5068..2fe1b2e67f0 100644 --- a/include/asm-generic/unaligned.h +++ b/include/asm-generic/unaligned.h @@ -79,24 +79,24 @@ static inline void __ustw(__u16 val, __u16 *addr) #define __get_unaligned(ptr, size) ({ \ const void *__gu_p = ptr; \ - __u64 val; \ + __u64 __val; \ switch (size) { \ case 1: \ - val = *(const __u8 *)__gu_p; \ + __val = *(const __u8 *)__gu_p; \ break; \ case 2: \ - val = __uldw(__gu_p); \ + __val = __uldw(__gu_p); \ break; \ case 4: \ - val = __uldl(__gu_p); \ + __val = __uldl(__gu_p); \ break; \ case 8: \ - val = __uldq(__gu_p); \ + __val = __uldq(__gu_p); \ break; \ default: \ bad_unaligned_access_length(); \ }; \ - (__force __typeof__(*(ptr)))val; \ + (__force __typeof__(*(ptr)))__val; \ }) #define __put_unaligned(val, ptr, size) \ diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index 84155eb67f1..0240e0506a0 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -224,7 +224,11 @@ } #define NOTES \ - .notes : { *(.note.*) } :note + .notes : AT(ADDR(.notes) - LOAD_OFFSET) { \ + VMLINUX_SYMBOL(__start_notes) = .; \ + *(.note.*) \ + VMLINUX_SYMBOL(__stop_notes) = .; \ + } #define INITCALLS \ *(.initcall0.init) \ @@ -245,3 +249,11 @@ *(.initcall7.init) \ *(.initcall7s.init) +#define PERCPU(align) \ + . = ALIGN(align); \ + __per_cpu_start = .; \ + .data.percpu : AT(ADDR(.data.percpu) - LOAD_OFFSET) { \ + *(.data.percpu) \ + *(.data.percpu.shared_aligned) \ + } \ + __per_cpu_end = .; |