aboutsummaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig9
-rw-r--r--lib/Kconfig.debug102
-rw-r--r--lib/Makefile10
-rw-r--r--lib/bitrev.c58
-rw-r--r--lib/bug.c163
-rw-r--r--lib/crc32.c28
-rw-r--r--lib/fault-inject.c306
-rw-r--r--lib/iomap.c299
-rw-r--r--lib/ioremap.c1
-rw-r--r--lib/kobject.c73
-rw-r--r--lib/kobject_uevent.c44
-rw-r--r--lib/kref.c7
-rw-r--r--lib/reciprocal_div.c9
-rw-r--r--lib/swiotlb.c290
14 files changed, 1204 insertions, 195 deletions
diff --git a/lib/Kconfig b/lib/Kconfig
index 734ce95a93d..9b03581cdec 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -4,6 +4,9 @@
menu "Library routines"
+config BITREVERSE
+ tristate
+
config CRC_CCITT
tristate "CRC-CCITT functions"
help
@@ -23,6 +26,7 @@ config CRC16
config CRC32
tristate "CRC32 functions"
default y
+ select BITREVERSE
help
This option is provided for the case where no in-kernel-tree
modules require CRC32 functions, but a module built outside the
@@ -97,4 +101,9 @@ config TEXTSEARCH_FSM
config PLIST
boolean
+config IOMAP_COPY
+ boolean
+ depends on !UML
+ default y
+
endmenu
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index b75fed737f2..5c2681875b9 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -47,6 +47,30 @@ config UNUSED_SYMBOLS
you really need it, and what the merge plan to the mainline kernel for
your module is.
+config DEBUG_FS
+ bool "Debug Filesystem"
+ depends on SYSFS
+ help
+ debugfs is a virtual file system that kernel developers use to put
+ debugging files into. Enable this option to be able to read and
+ write to these files.
+
+ If unsure, say N.
+
+config HEADERS_CHECK
+ bool "Run 'make headers_check' when building vmlinux"
+ depends on !UML
+ help
+ This option will extract the user-visible kernel headers whenever
+ building the kernel, and will run basic sanity checks on them to
+ ensure that exported files do not attempt to include files which
+ were not exported, etc.
+
+ If you're making modifications to header files which are
+ relevant for userspace, say 'Y', and check the headers
+ exported to $(INSTALL_HDR_PATH) (usually 'usr/include' in
+ your build tree), to make sure they're suitable.
+
config DEBUG_KERNEL
bool "Kernel debugging"
help
@@ -285,7 +309,7 @@ config DEBUG_HIGHMEM
config DEBUG_BUGVERBOSE
bool "Verbose BUG() reporting (adds 70K)" if DEBUG_KERNEL && EMBEDDED
depends on BUG
- depends on ARM || ARM26 || AVR32 || M32R || M68K || SPARC32 || SPARC64 || X86_32 || FRV || SUPERH
+ depends on ARM || ARM26 || AVR32 || M32R || M68K || SPARC32 || SPARC64 || FRV || SUPERH || GENERIC_BUG
default !EMBEDDED
help
Say Y here to make BUG() panics output the file name and line number
@@ -302,16 +326,6 @@ config DEBUG_INFO
If unsure, say N.
-config DEBUG_FS
- bool "Debug Filesystem"
- depends on SYSFS
- help
- debugfs is a virtual file system that kernel developers use to put
- debugging files into. Enable this option to be able to read and
- write to these files.
-
- If unsure, say N.
-
config DEBUG_VM
bool "Debug VM"
depends on DEBUG_KERNEL
@@ -340,24 +354,6 @@ config FRAME_POINTER
some architectures or if you use external debuggers.
If you don't debug the kernel, you can say N.
-config UNWIND_INFO
- bool "Compile the kernel with frame unwind information"
- depends on !IA64 && !PARISC && !ARM
- depends on !MODULES || !(MIPS || PPC || SUPERH || V850)
- help
- If you say Y here the resulting kernel image will be slightly larger
- but not slower, and it will give very useful debugging information.
- If you don't debug the kernel, you can say N, but we may not be able
- to solve problems without frame unwind information or frame pointers.
-
-config STACK_UNWIND
- bool "Stack unwind support"
- depends on UNWIND_INFO
- depends on X86
- help
- This enables more precise stack traces, omitting all unrelated
- occurrences of pointers into kernel code from the dump.
-
config FORCED_INLINING
bool "Force gcc to inline functions marked 'inline'"
depends on DEBUG_KERNEL
@@ -372,20 +368,6 @@ config FORCED_INLINING
become the default in the future, until then this option is there to
test gcc for this.
-config HEADERS_CHECK
- bool "Run 'make headers_check' when building vmlinux"
- depends on !UML
- help
- This option will extract the user-visible kernel headers whenever
- building the kernel, and will run basic sanity checks on them to
- ensure that exported files do not attempt to include files which
- were not exported, etc.
-
- If you're making modifications to header files which are
- relevant for userspace, say 'Y', and check the headers
- exported to $(INSTALL_HDR_PATH) (usually 'usr/include' in
- your build tree), to make sure they're suitable.
-
config RCU_TORTURE_TEST
tristate "torture tests for RCU"
depends on DEBUG_KERNEL
@@ -402,6 +384,7 @@ config RCU_TORTURE_TEST
config LKDTM
tristate "Linux Kernel Dump Test Tool Module"
+ depends on DEBUG_KERNEL
depends on KPROBES
default n
help
@@ -413,3 +396,36 @@ config LKDTM
Documentation on how to use the module can be found in
drivers/misc/lkdtm.c
+
+config FAULT_INJECTION
+ bool "Fault-injection framework"
+ depends on DEBUG_KERNEL
+ depends on STACKTRACE
+ select FRAME_POINTER
+ help
+ Provide fault-injection framework.
+ For more details, see Documentation/fault-injection/.
+
+config FAILSLAB
+ bool "Fault-injection capability for kmalloc"
+ depends on FAULT_INJECTION
+ help
+ Provide fault-injection capability for kmalloc.
+
+config FAIL_PAGE_ALLOC
+ bool "Fault-injection capabilitiy for alloc_pages()"
+ depends on FAULT_INJECTION
+ help
+ Provide fault-injection capability for alloc_pages().
+
+config FAIL_MAKE_REQUEST
+ bool "Fault-injection capability for disk IO"
+ depends on FAULT_INJECTION
+ help
+ Provide fault-injection capability for disk IO.
+
+config FAULT_INJECTION_DEBUG_FS
+ bool "Debugfs entries for fault-injection capabilities"
+ depends on FAULT_INJECTION && SYSFS && DEBUG_FS
+ help
+ Enable configuration of fault-injection capabilities via debugfs.
diff --git a/lib/Makefile b/lib/Makefile
index fea8f9035f0..3b605da448f 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -5,20 +5,21 @@
lib-y := ctype.o string.o vsprintf.o cmdline.o \
bust_spinlocks.o rbtree.o radix-tree.o dump_stack.o \
idr.o div64.o int_sqrt.o bitmap.o extable.o prio_tree.o \
- sha1.o irq_regs.o
+ sha1.o irq_regs.o reciprocal_div.o
lib-$(CONFIG_MMU) += ioremap.o
lib-$(CONFIG_SMP) += cpumask.o
lib-y += kobject.o kref.o kobject_uevent.o klist.o
-obj-y += sort.o parser.o halfmd4.o iomap_copy.o debug_locks.o random32.o
+obj-y += sort.o parser.o halfmd4.o debug_locks.o random32.o iomap.o
ifeq ($(CONFIG_DEBUG_KOBJECT),y)
CFLAGS_kobject.o += -DDEBUG
CFLAGS_kobject_uevent.o += -DDEBUG
endif
+obj-$(CONFIG_IOMAP_COPY) += iomap_copy.o
obj-$(CONFIG_DEBUG_LOCKING_API_SELFTESTS) += locking-selftest.o
obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o
lib-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o
@@ -35,11 +36,11 @@ ifneq ($(CONFIG_HAVE_DEC_LOCK),y)
lib-y += dec_and_lock.o
endif
+obj-$(CONFIG_BITREVERSE) += bitrev.o
obj-$(CONFIG_CRC_CCITT) += crc-ccitt.o
obj-$(CONFIG_CRC16) += crc16.o
obj-$(CONFIG_CRC32) += crc32.o
obj-$(CONFIG_LIBCRC32C) += libcrc32c.o
-obj-$(CONFIG_GENERIC_IOMAP) += iomap.o
obj-$(CONFIG_GENERIC_ALLOCATOR) += genalloc.o
obj-$(CONFIG_ZLIB_INFLATE) += zlib_inflate/
@@ -54,6 +55,9 @@ obj-$(CONFIG_SMP) += percpu_counter.o
obj-$(CONFIG_AUDIT_GENERIC) += audit.o
obj-$(CONFIG_SWIOTLB) += swiotlb.o
+obj-$(CONFIG_FAULT_INJECTION) += fault-inject.o
+
+lib-$(CONFIG_GENERIC_BUG) += bug.o
hostprogs-y := gen_crc32table
clean-files := crc32table.h
diff --git a/lib/bitrev.c b/lib/bitrev.c
new file mode 100644
index 00000000000..989aff73f88
--- /dev/null
+++ b/lib/bitrev.c
@@ -0,0 +1,58 @@
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/bitrev.h>
+
+MODULE_AUTHOR("Akinobu Mita <akinobu.mita@gmail.com>");
+MODULE_DESCRIPTION("Bit ordering reversal functions");
+MODULE_LICENSE("GPL");
+
+const u8 byte_rev_table[256] = {
+ 0x00, 0x80, 0x40, 0xc0, 0x20, 0xa0, 0x60, 0xe0,
+ 0x10, 0x90, 0x50, 0xd0, 0x30, 0xb0, 0x70, 0xf0,
+ 0x08, 0x88, 0x48, 0xc8, 0x28, 0xa8, 0x68, 0xe8,
+ 0x18, 0x98, 0x58, 0xd8, 0x38, 0xb8, 0x78, 0xf8,
+ 0x04, 0x84, 0x44, 0xc4, 0x24, 0xa4, 0x64, 0xe4,
+ 0x14, 0x94, 0x54, 0xd4, 0x34, 0xb4, 0x74, 0xf4,
+ 0x0c, 0x8c, 0x4c, 0xcc, 0x2c, 0xac, 0x6c, 0xec,
+ 0x1c, 0x9c, 0x5c, 0xdc, 0x3c, 0xbc, 0x7c, 0xfc,
+ 0x02, 0x82, 0x42, 0xc2, 0x22, 0xa2, 0x62, 0xe2,
+ 0x12, 0x92, 0x52, 0xd2, 0x32, 0xb2, 0x72, 0xf2,
+ 0x0a, 0x8a, 0x4a, 0xca, 0x2a, 0xaa, 0x6a, 0xea,
+ 0x1a, 0x9a, 0x5a, 0xda, 0x3a, 0xba, 0x7a, 0xfa,
+ 0x06, 0x86, 0x46, 0xc6, 0x26, 0xa6, 0x66, 0xe6,
+ 0x16, 0x96, 0x56, 0xd6, 0x36, 0xb6, 0x76, 0xf6,
+ 0x0e, 0x8e, 0x4e, 0xce, 0x2e, 0xae, 0x6e, 0xee,
+ 0x1e, 0x9e, 0x5e, 0xde, 0x3e, 0xbe, 0x7e, 0xfe,
+ 0x01, 0x81, 0x41, 0xc1, 0x21, 0xa1, 0x61, 0xe1,
+ 0x11, 0x91, 0x51, 0xd1, 0x31, 0xb1, 0x71, 0xf1,
+ 0x09, 0x89, 0x49, 0xc9, 0x29, 0xa9, 0x69, 0xe9,
+ 0x19, 0x99, 0x59, 0xd9, 0x39, 0xb9, 0x79, 0xf9,
+ 0x05, 0x85, 0x45, 0xc5, 0x25, 0xa5, 0x65, 0xe5,
+ 0x15, 0x95, 0x55, 0xd5, 0x35, 0xb5, 0x75, 0xf5,
+ 0x0d, 0x8d, 0x4d, 0xcd, 0x2d, 0xad, 0x6d, 0xed,
+ 0x1d, 0x9d, 0x5d, 0xdd, 0x3d, 0xbd, 0x7d, 0xfd,
+ 0x03, 0x83, 0x43, 0xc3, 0x23, 0xa3, 0x63, 0xe3,
+ 0x13, 0x93, 0x53, 0xd3, 0x33, 0xb3, 0x73, 0xf3,
+ 0x0b, 0x8b, 0x4b, 0xcb, 0x2b, 0xab, 0x6b, 0xeb,
+ 0x1b, 0x9b, 0x5b, 0xdb, 0x3b, 0xbb, 0x7b, 0xfb,
+ 0x07, 0x87, 0x47, 0xc7, 0x27, 0xa7, 0x67, 0xe7,
+ 0x17, 0x97, 0x57, 0xd7, 0x37, 0xb7, 0x77, 0xf7,
+ 0x0f, 0x8f, 0x4f, 0xcf, 0x2f, 0xaf, 0x6f, 0xef,
+ 0x1f, 0x9f, 0x5f, 0xdf, 0x3f, 0xbf, 0x7f, 0xff,
+};
+EXPORT_SYMBOL_GPL(byte_rev_table);
+
+static __always_inline u16 bitrev16(u16 x)
+{
+ return (bitrev8(x & 0xff) << 8) | bitrev8(x >> 8);
+}
+
+/**
+ * bitrev32 - reverse the order of bits in a u32 value
+ * @x: value to be bit-reversed
+ */
+u32 bitrev32(u32 x)
+{
+ return (bitrev16(x & 0xffff) << 16) | bitrev16(x >> 16);
+}
+EXPORT_SYMBOL(bitrev32);
diff --git a/lib/bug.c b/lib/bug.c
new file mode 100644
index 00000000000..014b582c5c4
--- /dev/null
+++ b/lib/bug.c
@@ -0,0 +1,163 @@
+/*
+ Generic support for BUG()
+
+ This respects the following config options:
+
+ CONFIG_BUG - emit BUG traps. Nothing happens without this.
+ CONFIG_GENERIC_BUG - enable this code.
+ CONFIG_DEBUG_BUGVERBOSE - emit full file+line information for each BUG
+
+ CONFIG_BUG and CONFIG_DEBUG_BUGVERBOSE are potentially user-settable
+ (though they're generally always on).
+
+ CONFIG_GENERIC_BUG is set by each architecture using this code.
+
+ To use this, your architecture must:
+
+ 1. Set up the config options:
+ - Enable CONFIG_GENERIC_BUG if CONFIG_BUG
+
+ 2. Implement BUG (and optionally BUG_ON, WARN, WARN_ON)
+ - Define HAVE_ARCH_BUG
+ - Implement BUG() to generate a faulting instruction
+ - NOTE: struct bug_entry does not have "file" or "line" entries
+ when CONFIG_DEBUG_BUGVERBOSE is not enabled, so you must generate
+ the values accordingly.
+
+ 3. Implement the trap
+ - In the illegal instruction trap handler (typically), verify
+ that the fault was in kernel mode, and call report_bug()
+ - report_bug() will return whether it was a false alarm, a warning,
+ or an actual bug.
+ - You must implement the is_valid_bugaddr(bugaddr) callback which
+ returns true if the eip is a real kernel address, and it points
+ to the expected BUG trap instruction.
+
+ Jeremy Fitzhardinge <jeremy@goop.org> 2006
+ */
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/bug.h>
+
+extern const struct bug_entry __start___bug_table[], __stop___bug_table[];
+
+#ifdef CONFIG_MODULES
+static LIST_HEAD(module_bug_list);
+
+static const struct bug_entry *module_find_bug(unsigned long bugaddr)
+{
+ struct module *mod;
+
+ list_for_each_entry(mod, &module_bug_list, bug_list) {
+ const struct bug_entry *bug = mod->bug_table;
+ unsigned i;
+
+ for (i = 0; i < mod->num_bugs; ++i, ++bug)
+ if (bugaddr == bug->bug_addr)
+ return bug;
+ }
+ return NULL;
+}
+
+int module_bug_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs,
+ struct module *mod)
+{
+ char *secstrings;
+ unsigned int i;
+
+ mod->bug_table = NULL;
+ mod->num_bugs = 0;
+
+ /* Find the __bug_table section, if present */
+ secstrings = (char *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
+ for (i = 1; i < hdr->e_shnum; i++) {
+ if (strcmp(secstrings+sechdrs[i].sh_name, "__bug_table"))
+ continue;
+ mod->bug_table = (void *) sechdrs[i].sh_addr;
+ mod->num_bugs = sechdrs[i].sh_size / sizeof(struct bug_entry);
+ break;
+ }
+
+ /*
+ * Strictly speaking this should have a spinlock to protect against
+ * traversals, but since we only traverse on BUG()s, a spinlock
+ * could potentially lead to deadlock and thus be counter-productive.
+ */
+ list_add(&mod->bug_list, &module_bug_list);
+
+ return 0;
+}
+
+void module_bug_cleanup(struct module *mod)
+{
+ list_del(&mod->bug_list);
+}
+
+#else
+
+static inline const struct bug_entry *module_find_bug(unsigned long bugaddr)
+{
+ return NULL;
+}
+#endif
+
+const struct bug_entry *find_bug(unsigned long bugaddr)
+{
+ const struct bug_entry *bug;
+
+ for (bug = __start___bug_table; bug < __stop___bug_table; ++bug)
+ if (bugaddr == bug->bug_addr)
+ return bug;
+
+ return module_find_bug(bugaddr);
+}
+
+enum bug_trap_type report_bug(unsigned long bugaddr)
+{
+ const struct bug_entry *bug;
+ const char *file;
+ unsigned line, warning;
+
+ if (!is_valid_bugaddr(bugaddr))
+ return BUG_TRAP_TYPE_NONE;
+
+ bug = find_bug(bugaddr);
+
+ printk(KERN_EMERG "------------[ cut here ]------------\n");
+
+ file = NULL;
+ line = 0;
+ warning = 0;
+
+ if (bug) {
+#ifdef CONFIG_DEBUG_BUGVERBOSE
+ file = bug->file;
+ line = bug->line;
+#endif
+ warning = (bug->flags & BUGFLAG_WARNING) != 0;
+ }
+
+ if (warning) {
+ /* this is a WARN_ON rather than BUG/BUG_ON */
+ if (file)
+ printk(KERN_ERR "Badness at %s:%u\n",
+ file, line);
+ else
+ printk(KERN_ERR "Badness at %p "
+ "[verbose debug info unavailable]\n",
+ (void *)bugaddr);
+
+ dump_stack();
+ return BUG_TRAP_TYPE_WARN;
+ }
+
+ if (file)
+ printk(KERN_CRIT "kernel BUG at %s:%u!\n",
+ file, line);
+ else
+ printk(KERN_CRIT "Kernel BUG at %p "
+ "[verbose debug info unavailable]\n",
+ (void *)bugaddr);
+
+ return BUG_TRAP_TYPE_BUG;
+}
diff --git a/lib/crc32.c b/lib/crc32.c
index 285fd9bc61b..bfc33314c72 100644
--- a/lib/crc32.c
+++ b/lib/crc32.c
@@ -235,23 +235,8 @@ u32 __attribute_pure__ crc32_be(u32 crc, unsigned char const *p, size_t len)
}
#endif
-/**
- * bitreverse - reverse the order of bits in a u32 value
- * @x: value to be bit-reversed
- */
-u32 bitreverse(u32 x)
-{
- x = (x >> 16) | (x << 16);
- x = (x >> 8 & 0x00ff00ff) | (x << 8 & 0xff00ff00);
- x = (x >> 4 & 0x0f0f0f0f) | (x << 4 & 0xf0f0f0f0);
- x = (x >> 2 & 0x33333333) | (x << 2 & 0xcccccccc);
- x = (x >> 1 & 0x55555555) | (x << 1 & 0xaaaaaaaa);
- return x;
-}
-
EXPORT_SYMBOL(crc32_le);
EXPORT_SYMBOL(crc32_be);
-EXPORT_SYMBOL(bitreverse);
/*
* A brief CRC tutorial.
@@ -400,10 +385,7 @@ buf_dump(char const *prefix, unsigned char const *buf, size_t len)
static void bytereverse(unsigned char *buf, size_t len)
{
while (len--) {
- unsigned char x = *buf;
- x = (x >> 4) | (x << 4);
- x = (x >> 2 & 0x33) | (x << 2 & 0xcc);
- x = (x >> 1 & 0x55) | (x << 1 & 0xaa);
+ unsigned char x = bitrev8(*buf);
*buf++ = x;
}
}
@@ -460,11 +442,11 @@ static u32 test_step(u32 init, unsigned char *buf, size_t len)
/* Now swap it around for the other test */
bytereverse(buf, len + 4);
- init = bitreverse(init);
- crc2 = bitreverse(crc1);
- if (crc1 != bitreverse(crc2))
+ init = bitrev32(init);
+ crc2 = bitrev32(crc1);
+ if (crc1 != bitrev32(crc2))
printf("\nBit reversal fail: 0x%08x -> 0x%08x -> 0x%08x\n",
- crc1, crc2, bitreverse(crc2));
+ crc1, crc2, bitrev32(crc2));
crc1 = crc32_le(init, buf, len);
if (crc1 != crc2)
printf("\nCRC endianness fail: 0x%08x != 0x%08x\n", crc1,
diff --git a/lib/fault-inject.c b/lib/fault-inject.c
new file mode 100644
index 00000000000..b5a90fc056d
--- /dev/null
+++ b/lib/fault-inject.c
@@ -0,0 +1,306 @@
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/random.h>
+#include <linux/stat.h>
+#include <linux/types.h>
+#include <linux/fs.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/unwind.h>
+#include <linux/stacktrace.h>
+#include <linux/kallsyms.h>
+#include <linux/fault-inject.h>
+
+/*
+ * setup_fault_attr() is a helper function for various __setup handlers, so it
+ * returns 0 on error, because that is what __setup handlers do.
+ */
+int __init setup_fault_attr(struct fault_attr *attr, char *str)
+{
+ unsigned long probability;
+ unsigned long interval;
+ int times;
+ int space;
+
+ /* "<interval>,<probability>,<space>,<times>" */
+ if (sscanf(str, "%lu,%lu,%d,%d",
+ &interval, &probability, &space, &times) < 4) {
+ printk(KERN_WARNING
+ "FAULT_INJECTION: failed to parse arguments\n");
+ return 0;
+ }
+
+ attr->probability = probability;
+ attr->interval = interval;
+ atomic_set(&attr->times, times);
+ atomic_set(&attr->space, space);
+
+ return 1;
+}
+
+static void fail_dump(struct fault_attr *attr)
+{
+ if (attr->verbose > 0)
+ printk(KERN_NOTICE "FAULT_INJECTION: forcing a failure\n");
+ if (attr->verbose > 1)
+ dump_stack();
+}
+
+#define atomic_dec_not_zero(v) atomic_add_unless((v), -1, 0)
+
+static bool fail_task(struct fault_attr *attr, struct task_struct *task)
+{
+ return !in_interrupt() && task->make_it_fail;
+}
+
+#define MAX_STACK_TRACE_DEPTH 32
+
+#if defined(CONFIG_STACKTRACE)
+
+static bool fail_stacktrace(struct fault_attr *attr)
+{
+ struct stack_trace trace;
+ int depth = attr->stacktrace_depth;
+ unsigned long entries[MAX_STACK_TRACE_DEPTH];
+ int n;
+ bool found = (attr->require_start == 0 && attr->require_end == ULONG_MAX);
+
+ if (depth == 0)
+ return found;
+
+ trace.nr_entries = 0;
+ trace.entries = entries;
+ trace.max_entries = depth;
+ trace.skip = 1;
+ trace.all_contexts = 0;
+
+ save_stack_trace(&trace, NULL);
+ for (n = 0; n < trace.nr_entries; n++) {
+ if (attr->reject_start <= entries[n] &&
+ entries[n] < attr->reject_end)
+ return false;
+ if (attr->require_start <= entries[n] &&
+ entries[n] < attr->require_end)
+ found = true;
+ }
+ return found;
+}
+
+#else
+
+static inline bool fail_stacktrace(struct fault_attr *attr)
+{
+ static bool firsttime = true;
+
+ if (firsttime) {
+ printk(KERN_WARNING
+ "This architecture does not implement save_stack_trace()\n");
+ firsttime = false;
+ }
+ return false;
+}
+
+#endif
+
+/*
+ * This code is stolen from failmalloc-1.0
+ * http://www.nongnu.org/failmalloc/
+ */
+
+bool should_fail(struct fault_attr *attr, ssize_t size)
+{
+ if (attr->task_filter && !fail_task(attr, current))
+ return false;
+
+ if (atomic_read(&attr->times) == 0)
+ return false;
+
+ if (atomic_read(&attr->space) > size) {
+ atomic_sub(size, &attr->space);
+ return false;
+ }
+
+ if (attr->interval > 1) {
+ attr->count++;
+ if (attr->count % attr->interval)
+ return false;
+ }
+
+ if (attr->probability <= random32() % 100)
+ return false;
+
+ if (!fail_stacktrace(attr))
+ return false;
+
+ fail_dump(attr);
+
+ if (atomic_read(&attr->times) != -1)
+ atomic_dec_not_zero(&attr->times);
+
+ return true;
+}
+
+#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
+
+static void debugfs_ul_set(void *data, u64 val)
+{
+ *(unsigned long *)data = val;
+}
+
+static void debugfs_ul_set_MAX_STACK_TRACE_DEPTH(void *data, u64 val)
+{
+ *(unsigned long *)data =
+ val < MAX_STACK_TRACE_DEPTH ?
+ val : MAX_STACK_TRACE_DEPTH;
+}
+
+static u64 debugfs_ul_get(void *data)
+{
+ return *(unsigned long *)data;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(fops_ul, debugfs_ul_get, debugfs_ul_set, "%llu\n");
+
+static struct dentry *debugfs_create_ul(const char *name, mode_t mode,
+ struct dentry *parent, unsigned long *value)
+{
+ return debugfs_create_file(name, mode, parent, value, &fops_ul);
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(fops_ul_MAX_STACK_TRACE_DEPTH, debugfs_ul_get,
+ debugfs_ul_set_MAX_STACK_TRACE_DEPTH, "%llu\n");
+
+static struct dentry *debugfs_create_ul_MAX_STACK_TRACE_DEPTH(
+ const char *name, mode_t mode,
+ struct dentry *parent, unsigned long *value)
+{
+ return debugfs_create_file(name, mode, parent, value,
+ &fops_ul_MAX_STACK_TRACE_DEPTH);
+}
+
+static void debugfs_atomic_t_set(void *data, u64 val)
+{
+ atomic_set((atomic_t *)data, val);
+}
+
+static u64 debugfs_atomic_t_get(void *data)
+{
+ return atomic_read((atomic_t *)data);
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(fops_atomic_t, debugfs_atomic_t_get,
+ debugfs_atomic_t_set, "%lld\n");
+
+static struct dentry *debugfs_create_atomic_t(const char *name, mode_t mode,
+ struct dentry *parent, atomic_t *value)
+{
+ return debugfs_create_file(name, mode, parent, value, &fops_atomic_t);
+}
+
+void cleanup_fault_attr_dentries(struct fault_attr *attr)
+{
+ debugfs_remove(attr->dentries.probability_file);
+ attr->dentries.probability_file = NULL;
+
+ debugfs_remove(attr->dentries.interval_file);
+ attr->dentries.interval_file = NULL;
+
+ debugfs_remove(attr->dentries.times_file);
+ attr->dentries.times_file = NULL;
+
+ debugfs_remove(attr->dentries.space_file);
+ attr->dentries.space_file = NULL;
+
+ debugfs_remove(attr->dentries.verbose_file);
+ attr->dentries.verbose_file = NULL;
+
+ debugfs_remove(attr->dentries.task_filter_file);
+ attr->dentries.task_filter_file = NULL;
+
+ debugfs_remove(attr->dentries.stacktrace_depth_file);
+ attr->dentries.stacktrace_depth_file = NULL;
+
+ debugfs_remove(attr->dentries.require_start_file);
+ attr->dentries.require_start_file = NULL;
+
+ debugfs_remove(attr->dentries.require_end_file);
+ attr->dentries.require_end_file = NULL;
+
+ debugfs_remove(attr->dentries.reject_start_file);
+ attr->dentries.reject_start_file = NULL;
+
+ debugfs_remove(attr->dentries.reject_end_file);
+ attr->dentries.reject_end_file = NULL;
+
+ if (attr->dentries.dir)
+ WARN_ON(!simple_empty(attr->dentries.dir));
+
+ debugfs_remove(attr->dentries.dir);
+ attr->dentries.dir = NULL;
+}
+
+int init_fault_attr_dentries(struct fault_attr *attr, const char *name)
+{
+ mode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
+ struct dentry *dir;
+
+ memset(&attr->dentries, 0, sizeof(attr->dentries));
+
+ dir = debugfs_create_dir(name, NULL);
+ if (!dir)
+ goto fail;
+ attr->dentries.dir = dir;
+
+ attr->dentries.probability_file =
+ debugfs_create_ul("probability", mode, dir, &attr->probability);
+
+ attr->dentries.interval_file =
+ debugfs_create_ul("interval", mode, dir, &attr->interval);
+
+ attr->dentries.times_file =
+ debugfs_create_atomic_t("times", mode, dir, &attr->times);
+
+ attr->dentries.space_file =
+ debugfs_create_atomic_t("space", mode, dir, &attr->space);
+
+ attr->dentries.verbose_file =
+ debugfs_create_ul("verbose", mode, dir, &attr->verbose);
+
+ attr->dentries.task_filter_file = debugfs_create_bool("task-filter",
+ mode, dir, &attr->task_filter);
+
+ attr->dentries.stacktrace_depth_file =
+ debugfs_create_ul_MAX_STACK_TRACE_DEPTH(
+ "stacktrace-depth", mode, dir, &attr->stacktrace_depth);
+
+ attr->dentries.require_start_file =
+ debugfs_create_ul("require-start", mode, dir, &attr->require_start);
+
+ attr->dentries.require_end_file =
+ debugfs_create_ul("require-end", mode, dir, &attr->require_end);
+
+ attr->dentries.reject_start_file =
+ debugfs_create_ul("reject-start", mode, dir, &attr->reject_start);
+
+ attr->dentries.reject_end_file =
+ debugfs_create_ul("reject-end", mode, dir, &attr->reject_end);
+
+
+ if (!attr->dentries.probability_file || !attr->dentries.interval_file
+ || !attr->dentries.times_file || !attr->dentries.space_file
+ || !attr->dentries.verbose_file || !attr->dentries.task_filter_file
+ || !attr->dentries.stacktrace_depth_file
+ || !attr->dentries.require_start_file
+ || !attr->dentries.require_end_file
+ || !attr->dentries.reject_start_file
+ || !attr->dentries.reject_end_file
+ )
+ goto fail;
+
+ return 0;
+fail:
+ cleanup_fault_attr_dentries(attr);
+ return -ENOMEM;
+}
+
+#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
diff --git a/lib/iomap.c b/lib/iomap.c
index d6ccdd85df5..4990c736bc4 100644
--- a/lib/iomap.c
+++ b/lib/iomap.c
@@ -4,8 +4,10 @@
* (C) Copyright 2004 Linus Torvalds
*/
#include <linux/pci.h>
+#include <linux/io.h>
+
+#ifdef CONFIG_GENERIC_IOMAP
#include <linux/module.h>
-#include <asm/io.h>
/*
* Read/write from/to an (offsettable) iomem cookie. It might be a PIO
@@ -254,3 +256,298 @@ void pci_iounmap(struct pci_dev *dev, void __iomem * addr)
}
EXPORT_SYMBOL(pci_iomap);
EXPORT_SYMBOL(pci_iounmap);
+
+#endif /* CONFIG_GENERIC_IOMAP */
+
+/*
+ * Generic iomap devres
+ */
+static void devm_ioport_map_release(struct device *dev, void *res)
+{
+ ioport_unmap(*(void __iomem **)res);
+}
+
+static int devm_ioport_map_match(struct device *dev, void *res,
+ void *match_data)
+{
+ return *(void **)res == match_data;
+}
+
+/**
+ * devm_ioport_map - Managed ioport_map()
+ * @dev: Generic device to map ioport for
+ * @port: Port to map
+ * @nr: Number of ports to map
+ *
+ * Managed ioport_map(). Map is automatically unmapped on driver
+ * detach.
+ */
+void __iomem * devm_ioport_map(struct device *dev, unsigned long port,
+ unsigned int nr)
+{
+ void __iomem **ptr, *addr;
+
+ ptr = devres_alloc(devm_ioport_map_release, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return NULL;
+
+ addr = ioport_map(port, nr);
+ if (addr) {
+ *ptr = addr;
+ devres_add(dev, ptr);
+ } else
+ devres_free(ptr);
+
+ return addr;
+}
+EXPORT_SYMBOL(devm_ioport_map);
+
+/**
+ * devm_ioport_unmap - Managed ioport_unmap()
+ * @dev: Generic device to unmap for
+ * @addr: Address to unmap
+ *
+ * Managed ioport_unmap(). @addr must have been mapped using
+ * devm_ioport_map().
+ */
+void devm_ioport_unmap(struct device *dev, void __iomem *addr)
+{
+ ioport_unmap(addr);
+ WARN_ON(devres_destroy(dev, devm_ioport_map_release,
+ devm_ioport_map_match, (void *)addr));
+}
+EXPORT_SYMBOL(devm_ioport_unmap);
+
+static void devm_ioremap_release(struct device *dev, void *res)
+{
+ iounmap(*(void __iomem **)res);
+}
+
+static int devm_ioremap_match(struct device *dev, void *res, void *match_data)
+{
+ return *(void **)res == match_data;
+}
+
+/**
+ * devm_ioremap - Managed ioremap()
+ * @dev: Generic device to remap IO address for
+ * @offset: BUS offset to map
+ * @size: Size of map
+ *
+ * Managed ioremap(). Map is automatically unmapped on driver detach.
+ */
+void __iomem *devm_ioremap(struct device *dev, unsigned long offset,
+ unsigned long size)
+{
+ void __iomem **ptr, *addr;
+
+ ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return NULL;
+
+ addr = ioremap(offset, size);
+ if (addr) {
+ *ptr = addr;
+ devres_add(dev, ptr);
+ } else
+ devres_free(ptr);
+
+ return addr;
+}
+EXPORT_SYMBOL(devm_ioremap);
+
+/**
+ * devm_ioremap_nocache - Managed ioremap_nocache()
+ * @dev: Generic device to remap IO address for
+ * @offset: BUS offset to map
+ * @size: Size of map
+ *
+ * Managed ioremap_nocache(). Map is automatically unmapped on driver
+ * detach.
+ */
+void __iomem *devm_ioremap_nocache(struct device *dev, unsigned long offset,
+ unsigned long size)
+{
+ void __iomem **ptr, *addr;
+
+ ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return NULL;
+
+ addr = ioremap_nocache(offset, size);
+ if (addr) {
+ *ptr = addr;
+ devres_add(dev, ptr);
+ } else
+ devres_free(ptr);
+
+ return addr;
+}
+EXPORT_SYMBOL(devm_ioremap_nocache);
+
+/**
+ * devm_iounmap - Managed iounmap()
+ * @dev: Generic device to unmap for
+ * @addr: Address to unmap
+ *
+ * Managed iounmap(). @addr must have been mapped using devm_ioremap*().
+ */
+void devm_iounmap(struct device *dev, void __iomem *addr)
+{
+ iounmap(addr);
+ WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match,
+ (void *)addr));
+}
+EXPORT_SYMBOL(devm_iounmap);
+
+/*
+ * PCI iomap devres
+ */
+#define PCIM_IOMAP_MAX PCI_ROM_RESOURCE
+
+struct pcim_iomap_devres {
+ void __iomem *table[PCIM_IOMAP_MAX];
+};
+
+static void pcim_iomap_release(struct device *gendev, void *res)
+{
+ struct pci_dev *dev = container_of(gendev, struct pci_dev, dev);
+ struct pcim_iomap_devres *this = res;
+ int i;
+
+ for (i = 0; i < PCIM_IOMAP_MAX; i++)
+ if (this->table[i])
+ pci_iounmap(dev, this->table[i]);
+}
+
+/**
+ * pcim_iomap_table - access iomap allocation table
+ * @pdev: PCI device to access iomap table for
+ *
+ * Access iomap allocation table for @dev. If iomap table doesn't
+ * exist and @pdev is managed, it will be allocated. All iomaps
+ * recorded in the iomap table are automatically unmapped on driver
+ * detach.
+ *
+ * This function might sleep when the table is first allocated but can
+ * be safely called without context and guaranteed to succed once
+ * allocated.
+ */
+void __iomem * const * pcim_iomap_table(struct pci_dev *pdev)
+{
+ struct pcim_iomap_devres *dr, *new_dr;
+
+ dr = devres_find(&pdev->dev, pcim_iomap_release, NULL, NULL);
+ if (dr)
+ return dr->table;
+
+ new_dr = devres_alloc(pcim_iomap_release, sizeof(*new_dr), GFP_KERNEL);
+ if (!new_dr)
+ return NULL;
+ dr = devres_get(&pdev->dev, new_dr, NULL, NULL);
+ return dr->table;
+}
+EXPORT_SYMBOL(pcim_iomap_table);
+
+/**
+ * pcim_iomap - Managed pcim_iomap()
+ * @pdev: PCI device to iomap for
+ * @bar: BAR to iomap
+ * @maxlen: Maximum length of iomap
+ *
+ * Managed pci_iomap(). Map is automatically unmapped on driver
+ * detach.
+ */
+void __iomem * pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen)
+{
+ void __iomem **tbl;
+
+ BUG_ON(bar >= PCIM_IOMAP_MAX);
+
+ tbl = (void __iomem **)pcim_iomap_table(pdev);
+ if (!tbl || tbl[bar]) /* duplicate mappings not allowed */
+ return NULL;
+
+ tbl[bar] = pci_iomap(pdev, bar, maxlen);
+ return tbl[bar];
+}
+EXPORT_SYMBOL(pcim_iomap);
+
+/**
+ * pcim_iounmap - Managed pci_iounmap()
+ * @pdev: PCI device to iounmap for
+ * @addr: Address to unmap
+ *
+ * Managed pci_iounmap(). @addr must have been mapped using pcim_iomap().
+ */
+void pcim_iounmap(struct pci_dev *pdev, void __iomem *addr)
+{
+ void __iomem **tbl;
+ int i;
+
+ pci_iounmap(pdev, addr);
+
+ tbl = (void __iomem **)pcim_iomap_table(pdev);
+ BUG_ON(!tbl);
+
+ for (i = 0; i < PCIM_IOMAP_MAX; i++)
+ if (tbl[i] == addr) {
+ tbl[i] = NULL;
+ return;
+ }
+ WARN_ON(1);
+}
+EXPORT_SYMBOL(pcim_iounmap);
+
+/**
+ * pcim_iomap_regions - Request and iomap PCI BARs
+ * @pdev: PCI device to map IO resources for
+ * @mask: Mask of BARs to request and iomap
+ * @name: Name used when requesting regions
+ *
+ * Request and iomap regions specified by @mask.
+ */
+int pcim_iomap_regions(struct pci_dev *pdev, u16 mask, const char *name)
+{
+ void __iomem * const *iomap;
+ int i, rc;
+
+ iomap = pcim_iomap_table(pdev);
+ if (!iomap)
+ return -ENOMEM;
+
+ for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
+ unsigned long len;
+
+ if (!(mask & (1 << i)))
+ continue;
+
+ rc = -EINVAL;
+ len = pci_resource_len(pdev, i);
+ if (!len)
+ goto err_inval;
+
+ rc = pci_request_region(pdev, i, name);
+ if (rc)
+ goto err_region;
+
+ rc = -ENOMEM;
+ if (!pcim_iomap(pdev, i, 0))
+ goto err_iomap;
+ }
+
+ return 0;
+
+ err_iomap:
+ pcim_iounmap(pdev, iomap[i]);
+ err_region:
+ pci_release_region(pdev, i);
+ err_inval:
+ while (--i >= 0) {
+ pcim_iounmap(pdev, iomap[i]);
+ pci_release_region(pdev, i);
+ }
+
+ return rc;
+}
+EXPORT_SYMBOL(pcim_iomap_regions);
diff --git a/lib/ioremap.c b/lib/ioremap.c
index 99fa277f9f7..a9e4415b02d 100644
--- a/lib/ioremap.c
+++ b/lib/ioremap.c
@@ -5,7 +5,6 @@
*
* (C) Copyright 1995 1996 Linus Torvalds
*/
-#include <linux/io.h>
#include <linux/vmalloc.h>
#include <linux/mm.h>
diff --git a/lib/kobject.c b/lib/kobject.c
index 7ce6dc138e9..c2917ffe8bf 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -44,11 +44,11 @@ static int populate_dir(struct kobject * kobj)
return error;
}
-static int create_dir(struct kobject * kobj)
+static int create_dir(struct kobject * kobj, struct dentry *shadow_parent)
{
int error = 0;
if (kobject_name(kobj)) {
- error = sysfs_create_dir(kobj);
+ error = sysfs_create_dir(kobj, shadow_parent);
if (!error) {
if ((error = populate_dir(kobj)))
sysfs_remove_dir(kobj);
@@ -126,6 +126,8 @@ EXPORT_SYMBOL_GPL(kobject_get_path);
*/
void kobject_init(struct kobject * kobj)
{
+ if (!kobj)
+ return;
kref_init(&kobj->kref);
INIT_LIST_HEAD(&kobj->entry);
init_waitqueue_head(&kobj->poll);
@@ -156,9 +158,10 @@ static void unlink(struct kobject * kobj)
/**
* kobject_add - add an object to the hierarchy.
* @kobj: object.
+ * @shadow_parent: sysfs directory to add to.
*/
-int kobject_add(struct kobject * kobj)
+int kobject_shadow_add(struct kobject * kobj, struct dentry *shadow_parent)
{
int error = 0;
struct kobject * parent;
@@ -189,12 +192,11 @@ int kobject_add(struct kobject * kobj)
}
kobj->parent = parent;
- error = create_dir(kobj);
+ error = create_dir(kobj, shadow_parent);
if (error) {
/* unlink does the kobject_put() for us */
unlink(kobj);
- if (parent)
- kobject_put(parent);
+ kobject_put(parent);
/* be noisy on error issues */
if (error == -EEXIST)
@@ -211,6 +213,15 @@ int kobject_add(struct kobject * kobj)
return error;
}
+/**
+ * kobject_add - add an object to the hierarchy.
+ * @kobj: object.
+ */
+int kobject_add(struct kobject * kobj)
+{
+ return kobject_shadow_add(kobj, NULL);
+}
+
/**
* kobject_register - initialize and add an object.
@@ -303,7 +314,29 @@ int kobject_rename(struct kobject * kobj, const char *new_name)
kobj = kobject_get(kobj);
if (!kobj)
return -EINVAL;
- error = sysfs_rename_dir(kobj, new_name);
+ if (!kobj->parent)
+ return -EINVAL;
+ error = sysfs_rename_dir(kobj, kobj->parent->dentry, new_name);
+ kobject_put(kobj);
+
+ return error;
+}
+
+/**
+ * kobject_rename - change the name of an object
+ * @kobj: object in question.
+ * @new_name: object's new name
+ */
+
+int kobject_shadow_rename(struct kobject * kobj, struct dentry *new_parent,
+ const char *new_name)
+{
+ int error = 0;
+
+ kobj = kobject_get(kobj);
+ if (!kobj)
+ return -EINVAL;
+ error = sysfs_rename_dir(kobj, new_parent, new_name);
kobject_put(kobj);
return error;
@@ -312,7 +345,7 @@ int kobject_rename(struct kobject * kobj, const char *new_name)
/**
* kobject_move - move object to another parent
* @kobj: object in question.
- * @new_parent: object's new parent
+ * @new_parent: object's new parent (can be NULL)
*/
int kobject_move(struct kobject *kobj, struct kobject *new_parent)
@@ -328,8 +361,8 @@ int kobject_move(struct kobject *kobj, struct kobject *new_parent)
return -EINVAL;
new_parent = kobject_get(new_parent);
if (!new_parent) {
- error = -EINVAL;
- goto out;
+ if (kobj->kset)
+ new_parent = kobject_get(&kobj->kset->kobj);
}
/* old object path */
devpath = kobject_get_path(kobj, GFP_KERNEL);
@@ -366,6 +399,8 @@ out:
void kobject_del(struct kobject * kobj)
{
+ if (!kobj)
+ return;
sysfs_remove_dir(kobj);
unlink(kobj);
}
@@ -377,6 +412,8 @@ void kobject_del(struct kobject * kobj)
void kobject_unregister(struct kobject * kobj)
{
+ if (!kobj)
+ return;
pr_debug("kobject %s: unregistering\n",kobject_name(kobj));
kobject_uevent(kobj, KOBJ_REMOVE);
kobject_del(kobj);
@@ -414,8 +451,7 @@ void kobject_cleanup(struct kobject * kobj)
t->release(kobj);
if (s)
kset_put(s);
- if (parent)
- kobject_put(parent);
+ kobject_put(parent);
}
static void kobject_release(struct kref *kref)
@@ -523,6 +559,8 @@ int kset_add(struct kset * k)
int kset_register(struct kset * k)
{
+ if (!k)
+ return -EINVAL;
kset_init(k);
return kset_add(k);
}
@@ -535,6 +573,8 @@ int kset_register(struct kset * k)
void kset_unregister(struct kset * k)
{
+ if (!k)
+ return;
kobject_unregister(&k->kobj);
}
@@ -586,6 +626,9 @@ int subsystem_register(struct subsystem * s)
{
int error;
+ if (!s)
+ return -EINVAL;
+
subsystem_init(s);
pr_debug("subsystem %s: registering\n",s->kset.kobj.name);
@@ -598,6 +641,8 @@ int subsystem_register(struct subsystem * s)
void subsystem_unregister(struct subsystem * s)
{
+ if (!s)
+ return;
pr_debug("subsystem %s: unregistering\n",s->kset.kobj.name);
kset_unregister(&s->kset);
}
@@ -612,6 +657,10 @@ void subsystem_unregister(struct subsystem * s)
int subsys_create_file(struct subsystem * s, struct subsys_attribute * a)
{
int error = 0;
+
+ if (!s || !a)
+ return -EINVAL;
+
if (subsys_get(s)) {
error = sysfs_create_file(&s->kset.kobj,&a->attr);
subsys_put(s);
diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c
index a1922765ff3..84272ed77f0 100644
--- a/lib/kobject_uevent.c
+++ b/lib/kobject_uevent.c
@@ -63,8 +63,11 @@ static char *action_to_string(enum kobject_action action)
* @action: action that is happening (usually KOBJ_MOVE)
* @kobj: struct kobject that the action is happening to
* @envp_ext: pointer to environmental data
+ *
+ * Returns 0 if kobject_uevent() is completed with success or the
+ * corresponding error when it fails.
*/
-void kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
+int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
char *envp_ext[])
{
char **envp;
@@ -79,14 +82,16 @@ void kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
u64 seq;
char *seq_buff;
int i = 0;
- int retval;
+ int retval = 0;
int j;
pr_debug("%s\n", __FUNCTION__);
action_string = action_to_string(action);
- if (!action_string)
- return;
+ if (!action_string) {
+ pr_debug("kobject attempted to send uevent without action_string!\n");
+ return -EINVAL;
+ }
/* search the kset we belong to */
top_kobj = kobj;
@@ -95,31 +100,39 @@ void kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
top_kobj = top_kobj->parent;
} while (!top_kobj->kset && top_kobj->parent);
}
- if (!top_kobj->kset)
- return;
+ if (!top_kobj->kset) {
+ pr_debug("kobject attempted to send uevent without kset!\n");
+ return -EINVAL;
+ }
kset = top_kobj->kset;
uevent_ops = kset->uevent_ops;
/* skip the event, if the filter returns zero. */
if (uevent_ops && uevent_ops->filter)
- if (!uevent_ops->filter(kset, kobj))
- return;
+ if (!uevent_ops->filter(kset, kobj)) {
+ pr_debug("kobject filter function caused the event to drop!\n");
+ return 0;
+ }
/* environment index */
envp = kzalloc(NUM_ENVP * sizeof (char *), GFP_KERNEL);
if (!envp)
- return;
+ return -ENOMEM;
/* environment values */
buffer = kmalloc(BUFFER_SIZE, GFP_KERNEL);
- if (!buffer)
+ if (!buffer) {
+ retval = -ENOMEM;
goto exit;
+ }
/* complete object path */
devpath = kobject_get_path(kobj, GFP_KERNEL);
- if (!devpath)
+ if (!devpath) {
+ retval = -ENOENT;
goto exit;
+ }
/* originating subsystem */
if (uevent_ops && uevent_ops->name)
@@ -204,7 +217,7 @@ exit:
kfree(devpath);
kfree(buffer);
kfree(envp);
- return;
+ return retval;
}
EXPORT_SYMBOL_GPL(kobject_uevent_env);
@@ -214,10 +227,13 @@ EXPORT_SYMBOL_GPL(kobject_uevent_env);
*
* @action: action that is happening (usually KOBJ_ADD and KOBJ_REMOVE)
* @kobj: struct kobject that the action is happening to
+ *
+ * Returns 0 if kobject_uevent() is completed with success or the
+ * corresponding error when it fails.
*/
-void kobject_uevent(struct kobject *kobj, enum kobject_action action)
+int kobject_uevent(struct kobject *kobj, enum kobject_action action)
{
- kobject_uevent_env(kobj, action, NULL);
+ return kobject_uevent_env(kobj, action, NULL);
}
EXPORT_SYMBOL_GPL(kobject_uevent);
diff --git a/lib/kref.c b/lib/kref.c
index 4a467faf136..0d07cc31c81 100644
--- a/lib/kref.c
+++ b/lib/kref.c
@@ -52,12 +52,7 @@ int kref_put(struct kref *kref, void (*release)(struct kref *kref))
WARN_ON(release == NULL);
WARN_ON(release == (void (*)(struct kref *))kfree);
- /*
- * if current count is one, we are the last user and can release object
- * right now, avoiding an atomic operation on 'refcount'
- */
- if ((atomic_read(&kref->refcount) == 1) ||
- (atomic_dec_and_test(&kref->refcount))) {
+ if (atomic_dec_and_test(&kref->refcount)) {
release(kref);
return 1;
}
diff --git a/lib/reciprocal_div.c b/lib/reciprocal_div.c
new file mode 100644
index 00000000000..6a3bd48fa2a
--- /dev/null
+++ b/lib/reciprocal_div.c
@@ -0,0 +1,9 @@
+#include <asm/div64.h>
+#include <linux/reciprocal_div.h>
+
+u32 reciprocal_value(u32 k)
+{
+ u64 val = (1LL << 32) + (k - 1);
+ do_div(val, k);
+ return (u32)val;
+}
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index 10625785eef..50a43801018 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -1,7 +1,7 @@
/*
* Dynamic DMA mapping support.
*
- * This implementation is for IA-64 and EM64T platforms that do not support
+ * This implementation is a fallback for platforms that do not support
* I/O TLBs (aka DMA address translation hardware).
* Copyright (C) 2000 Asit Mallick <Asit.K.Mallick@intel.com>
* Copyright (C) 2000 Goutham Rao <goutham.rao@intel.com>
@@ -28,6 +28,7 @@
#include <asm/io.h>
#include <asm/dma.h>
#include <asm/scatterlist.h>
+#include <asm/swiotlb.h>
#include <linux/init.h>
#include <linux/bootmem.h>
@@ -35,8 +36,10 @@
#define OFFSET(val,align) ((unsigned long) \
( (val) & ( (align) - 1)))
+#ifndef SG_ENT_VIRT_ADDRESS
#define SG_ENT_VIRT_ADDRESS(sg) (page_address((sg)->page) + (sg)->offset)
-#define SG_ENT_PHYS_ADDRESS(SG) virt_to_phys(SG_ENT_VIRT_ADDRESS(SG))
+#define SG_ENT_PHYS_ADDRESS(sg) virt_to_bus(SG_ENT_VIRT_ADDRESS(sg))
+#endif
/*
* Maximum allowable number of contiguous slabs to map,
@@ -101,13 +104,25 @@ static unsigned int io_tlb_index;
* We need to save away the original address corresponding to a mapped entry
* for the sync operations.
*/
-static unsigned char **io_tlb_orig_addr;
+#ifndef SWIOTLB_ARCH_HAS_IO_TLB_ADDR_T
+typedef char *io_tlb_addr_t;
+#define swiotlb_orig_addr_null(buffer) (!(buffer))
+#define ptr_to_io_tlb_addr(ptr) (ptr)
+#define page_to_io_tlb_addr(pg, off) (page_address(pg) + (off))
+#define sg_to_io_tlb_addr(sg) SG_ENT_VIRT_ADDRESS(sg)
+#endif
+static io_tlb_addr_t *io_tlb_orig_addr;
/*
* Protect the above data structures in the map and unmap calls
*/
static DEFINE_SPINLOCK(io_tlb_lock);
+#ifdef SWIOTLB_EXTRA_VARIABLES
+SWIOTLB_EXTRA_VARIABLES;
+#endif
+
+#ifndef SWIOTLB_ARCH_HAS_SETUP_IO_TLB_NPAGES
static int __init
setup_io_tlb_npages(char *str)
{
@@ -122,30 +137,50 @@ setup_io_tlb_npages(char *str)
swiotlb_force = 1;
return 1;
}
+#endif
__setup("swiotlb=", setup_io_tlb_npages);
/* make io_tlb_overflow tunable too? */
+#ifndef swiotlb_adjust_size
+#define swiotlb_adjust_size(size) ((void)0)
+#endif
+
+#ifndef swiotlb_adjust_seg
+#define swiotlb_adjust_seg(start, size) ((void)0)
+#endif
+
+#ifndef swiotlb_print_info
+#define swiotlb_print_info(bytes) \
+ printk(KERN_INFO "Placing %luMB software IO TLB between 0x%lx - " \
+ "0x%lx\n", bytes >> 20, \
+ virt_to_bus(io_tlb_start), virt_to_bus(io_tlb_end))
+#endif
+
/*
* Statically reserve bounce buffer space and initialize bounce buffer data
* structures for the software IO TLB used to implement the DMA API.
*/
-void
-swiotlb_init_with_default_size (size_t default_size)
+void __init
+swiotlb_init_with_default_size(size_t default_size)
{
- unsigned long i;
+ unsigned long i, bytes;
if (!io_tlb_nslabs) {
io_tlb_nslabs = (default_size >> IO_TLB_SHIFT);
io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
}
+ swiotlb_adjust_size(io_tlb_nslabs);
+ swiotlb_adjust_size(io_tlb_overflow);
+
+ bytes = io_tlb_nslabs << IO_TLB_SHIFT;
/*
* Get IO TLB memory from the low pages
*/
- io_tlb_start = alloc_bootmem_low_pages(io_tlb_nslabs * (1 << IO_TLB_SHIFT));
+ io_tlb_start = alloc_bootmem_low_pages(bytes);
if (!io_tlb_start)
panic("Cannot allocate SWIOTLB buffer");
- io_tlb_end = io_tlb_start + io_tlb_nslabs * (1 << IO_TLB_SHIFT);
+ io_tlb_end = io_tlb_start + bytes;
/*
* Allocate and initialize the free list array. This array is used
@@ -153,34 +188,45 @@ swiotlb_init_with_default_size (size_t default_size)
* between io_tlb_start and io_tlb_end.
*/
io_tlb_list = alloc_bootmem(io_tlb_nslabs * sizeof(int));
- for (i = 0; i < io_tlb_nslabs; i++)
+ for (i = 0; i < io_tlb_nslabs; i++) {
+ if ( !(i % IO_TLB_SEGSIZE) )
+ swiotlb_adjust_seg(io_tlb_start + (i << IO_TLB_SHIFT),
+ IO_TLB_SEGSIZE << IO_TLB_SHIFT);
io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
+ }
io_tlb_index = 0;
- io_tlb_orig_addr = alloc_bootmem(io_tlb_nslabs * sizeof(char *));
+ io_tlb_orig_addr = alloc_bootmem(io_tlb_nslabs * sizeof(io_tlb_addr_t));
/*
* Get the overflow emergency buffer
*/
io_tlb_overflow_buffer = alloc_bootmem_low(io_tlb_overflow);
- printk(KERN_INFO "Placing software IO TLB between 0x%lx - 0x%lx\n",
- virt_to_phys(io_tlb_start), virt_to_phys(io_tlb_end));
+ if (!io_tlb_overflow_buffer)
+ panic("Cannot allocate SWIOTLB overflow buffer!\n");
+ swiotlb_adjust_seg(io_tlb_overflow_buffer, io_tlb_overflow);
+
+ swiotlb_print_info(bytes);
}
+#ifndef __swiotlb_init_with_default_size
+#define __swiotlb_init_with_default_size swiotlb_init_with_default_size
+#endif
-void
-swiotlb_init (void)
+void __init
+swiotlb_init(void)
{
- swiotlb_init_with_default_size(64 * (1<<20)); /* default to 64MB */
+ __swiotlb_init_with_default_size(64 * (1<<20)); /* default to 64MB */
}
+#ifdef SWIOTLB_ARCH_NEED_LATE_INIT
/*
* Systems with larger DMA zones (those that don't support ISA) can
* initialize the swiotlb later using the slab allocator if needed.
* This should be just like above, but with some error catching.
*/
int
-swiotlb_late_init_with_default_size (size_t default_size)
+swiotlb_late_init_with_default_size(size_t default_size)
{
- unsigned long i, req_nslabs = io_tlb_nslabs;
+ unsigned long i, bytes, req_nslabs = io_tlb_nslabs;
unsigned int order;
if (!io_tlb_nslabs) {
@@ -191,8 +237,9 @@ swiotlb_late_init_with_default_size (size_t default_size)
/*
* Get IO TLB memory from the low pages
*/
- order = get_order(io_tlb_nslabs * (1 << IO_TLB_SHIFT));
+ order = get_order(io_tlb_nslabs << IO_TLB_SHIFT);
io_tlb_nslabs = SLABS_PER_PAGE << order;
+ bytes = io_tlb_nslabs << IO_TLB_SHIFT;
while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
io_tlb_start = (char *)__get_free_pages(GFP_DMA | __GFP_NOWARN,
@@ -205,13 +252,14 @@ swiotlb_late_init_with_default_size (size_t default_size)
if (!io_tlb_start)
goto cleanup1;
- if (order != get_order(io_tlb_nslabs * (1 << IO_TLB_SHIFT))) {
+ if (order != get_order(bytes)) {
printk(KERN_WARNING "Warning: only able to allocate %ld MB "
"for software IO TLB\n", (PAGE_SIZE << order) >> 20);
io_tlb_nslabs = SLABS_PER_PAGE << order;
+ bytes = io_tlb_nslabs << IO_TLB_SHIFT;
}
- io_tlb_end = io_tlb_start + io_tlb_nslabs * (1 << IO_TLB_SHIFT);
- memset(io_tlb_start, 0, io_tlb_nslabs * (1 << IO_TLB_SHIFT));
+ io_tlb_end = io_tlb_start + bytes;
+ memset(io_tlb_start, 0, bytes);
/*
* Allocate and initialize the free list array. This array is used
@@ -227,12 +275,12 @@ swiotlb_late_init_with_default_size (size_t default_size)
io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
io_tlb_index = 0;
- io_tlb_orig_addr = (unsigned char **)__get_free_pages(GFP_KERNEL,
- get_order(io_tlb_nslabs * sizeof(char *)));
+ io_tlb_orig_addr = (io_tlb_addr_t *)__get_free_pages(GFP_KERNEL,
+ get_order(io_tlb_nslabs * sizeof(io_tlb_addr_t)));
if (!io_tlb_orig_addr)
goto cleanup3;
- memset(io_tlb_orig_addr, 0, io_tlb_nslabs * sizeof(char *));
+ memset(io_tlb_orig_addr, 0, io_tlb_nslabs * sizeof(io_tlb_addr_t));
/*
* Get the overflow emergency buffer
@@ -242,29 +290,29 @@ swiotlb_late_init_with_default_size (size_t default_size)
if (!io_tlb_overflow_buffer)
goto cleanup4;
- printk(KERN_INFO "Placing %ldMB software IO TLB between 0x%lx - "
- "0x%lx\n", (io_tlb_nslabs * (1 << IO_TLB_SHIFT)) >> 20,
- virt_to_phys(io_tlb_start), virt_to_phys(io_tlb_end));
+ swiotlb_print_info(bytes);
return 0;
cleanup4:
- free_pages((unsigned long)io_tlb_orig_addr, get_order(io_tlb_nslabs *
- sizeof(char *)));
+ free_pages((unsigned long)io_tlb_orig_addr,
+ get_order(io_tlb_nslabs * sizeof(io_tlb_addr_t)));
io_tlb_orig_addr = NULL;
cleanup3:
- free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs *
- sizeof(int)));
+ free_pages((unsigned long)io_tlb_list,
+ get_order(io_tlb_nslabs * sizeof(int)));
io_tlb_list = NULL;
- io_tlb_end = NULL;
cleanup2:
+ io_tlb_end = NULL;
free_pages((unsigned long)io_tlb_start, order);
io_tlb_start = NULL;
cleanup1:
io_tlb_nslabs = req_nslabs;
return -ENOMEM;
}
+#endif
+#ifndef SWIOTLB_ARCH_HAS_NEEDS_MAPPING
static inline int
address_needs_mapping(struct device *hwdev, dma_addr_t addr)
{
@@ -275,11 +323,35 @@ address_needs_mapping(struct device *hwdev, dma_addr_t addr)
return (addr & ~mask) != 0;
}
+static inline int range_needs_mapping(const void *ptr, size_t size)
+{
+ return swiotlb_force;
+}
+
+static inline int order_needs_mapping(unsigned int order)
+{
+ return 0;
+}
+#endif
+
+static void
+__sync_single(io_tlb_addr_t buffer, char *dma_addr, size_t size, int dir)
+{
+#ifndef SWIOTLB_ARCH_HAS_SYNC_SINGLE
+ if (dir == DMA_TO_DEVICE)
+ memcpy(dma_addr, buffer, size);
+ else
+ memcpy(buffer, dma_addr, size);
+#else
+ __swiotlb_arch_sync_single(buffer, dma_addr, size, dir);
+#endif
+}
+
/*
* Allocates bounce buffer and returns its kernel virtual address.
*/
static void *
-map_single(struct device *hwdev, char *buffer, size_t size, int dir)
+map_single(struct device *hwdev, io_tlb_addr_t buffer, size_t size, int dir)
{
unsigned long flags;
char *dma_addr;
@@ -352,7 +424,7 @@ map_single(struct device *hwdev, char *buffer, size_t size, int dir)
*/
io_tlb_orig_addr[index] = buffer;
if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)
- memcpy(dma_addr, buffer, size);
+ __sync_single(buffer, dma_addr, size, DMA_TO_DEVICE);
return dma_addr;
}
@@ -366,17 +438,18 @@ unmap_single(struct device *hwdev, char *dma_addr, size_t size, int dir)
unsigned long flags;
int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT;
- char *buffer = io_tlb_orig_addr[index];
+ io_tlb_addr_t buffer = io_tlb_orig_addr[index];
/*
* First, sync the memory before unmapping the entry
*/
- if (buffer && ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL)))
+ if (!swiotlb_orig_addr_null(buffer)
+ && ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL)))
/*
* bounce... copy the data back into the original buffer * and
* delete the bounce buffer.
*/
- memcpy(buffer, dma_addr, size);
+ __sync_single(buffer, dma_addr, size, DMA_FROM_DEVICE);
/*
* Return the buffer to the free list by setting the corresponding
@@ -409,18 +482,18 @@ sync_single(struct device *hwdev, char *dma_addr, size_t size,
int dir, int target)
{
int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT;
- char *buffer = io_tlb_orig_addr[index];
+ io_tlb_addr_t buffer = io_tlb_orig_addr[index];
switch (target) {
case SYNC_FOR_CPU:
if (likely(dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL))
- memcpy(buffer, dma_addr, size);
+ __sync_single(buffer, dma_addr, size, DMA_FROM_DEVICE);
else
BUG_ON(dir != DMA_TO_DEVICE);
break;
case SYNC_FOR_DEVICE:
if (likely(dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL))
- memcpy(dma_addr, buffer, size);
+ __sync_single(buffer, dma_addr, size, DMA_TO_DEVICE);
else
BUG_ON(dir != DMA_FROM_DEVICE);
break;
@@ -429,11 +502,13 @@ sync_single(struct device *hwdev, char *dma_addr, size_t size,
}
}
+#ifdef SWIOTLB_ARCH_NEED_ALLOC
+
void *
swiotlb_alloc_coherent(struct device *hwdev, size_t size,
dma_addr_t *dma_handle, gfp_t flags)
{
- unsigned long dev_addr;
+ dma_addr_t dev_addr;
void *ret;
int order = get_order(size);
@@ -444,8 +519,11 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
*/
flags |= GFP_DMA;
- ret = (void *)__get_free_pages(flags, order);
- if (ret && address_needs_mapping(hwdev, virt_to_phys(ret))) {
+ if (!order_needs_mapping(order))
+ ret = (void *)__get_free_pages(flags, order);
+ else
+ ret = NULL;
+ if (ret && address_needs_mapping(hwdev, virt_to_bus(ret))) {
/*
* The allocated memory isn't reachable by the device.
* Fall back on swiotlb_map_single().
@@ -465,22 +543,24 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
if (swiotlb_dma_mapping_error(handle))
return NULL;
- ret = phys_to_virt(handle);
+ ret = bus_to_virt(handle);
}
memset(ret, 0, size);
- dev_addr = virt_to_phys(ret);
+ dev_addr = virt_to_bus(ret);
/* Confirm address can be DMA'd by device */
if (address_needs_mapping(hwdev, dev_addr)) {
- printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016lx\n",
- (unsigned long long)*hwdev->dma_mask, dev_addr);
+ printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n",
+ (unsigned long long)*hwdev->dma_mask,
+ (unsigned long long)dev_addr);
panic("swiotlb_alloc_coherent: allocated memory is out of "
"range for device");
}
*dma_handle = dev_addr;
return ret;
}
+EXPORT_SYMBOL(swiotlb_alloc_coherent);
void
swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
@@ -493,6 +573,9 @@ swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
/* DMA_TO_DEVICE to avoid memcpy in unmap_single */
swiotlb_unmap_single (hwdev, dma_handle, size, DMA_TO_DEVICE);
}
+EXPORT_SYMBOL(swiotlb_free_coherent);
+
+#endif
static void
swiotlb_full(struct device *dev, size_t size, int dir, int do_panic)
@@ -504,7 +587,7 @@ swiotlb_full(struct device *dev, size_t size, int dir, int do_panic)
* When the mapping is small enough return a static buffer to limit
* the damage, or panic when the transfer is too big.
*/
- printk(KERN_ERR "DMA: Out of SW-IOMMU space for %lu bytes at "
+ printk(KERN_ERR "DMA: Out of SW-IOMMU space for %zu bytes at "
"device %s\n", size, dev ? dev->bus_id : "?");
if (size > io_tlb_overflow && do_panic) {
@@ -525,7 +608,7 @@ swiotlb_full(struct device *dev, size_t size, int dir, int do_panic)
dma_addr_t
swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir)
{
- unsigned long dev_addr = virt_to_phys(ptr);
+ dma_addr_t dev_addr = virt_to_bus(ptr);
void *map;
BUG_ON(dir == DMA_NONE);
@@ -534,19 +617,20 @@ swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir)
* we can safely return the device addr and not worry about bounce
* buffering it.
*/
- if (!address_needs_mapping(hwdev, dev_addr) && !swiotlb_force)
+ if (!range_needs_mapping(ptr, size)
+ && !address_needs_mapping(hwdev, dev_addr))
return dev_addr;
/*
* Oh well, have to allocate and map a bounce buffer.
*/
- map = map_single(hwdev, ptr, size, dir);
+ map = map_single(hwdev, ptr_to_io_tlb_addr(ptr), size, dir);
if (!map) {
swiotlb_full(hwdev, size, dir, 1);
map = io_tlb_overflow_buffer;
}
- dev_addr = virt_to_phys(map);
+ dev_addr = virt_to_bus(map);
/*
* Ensure that the address returned is DMA'ble
@@ -558,25 +642,6 @@ swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir)
}
/*
- * Since DMA is i-cache coherent, any (complete) pages that were written via
- * DMA can be marked as "clean" so that lazy_mmu_prot_update() doesn't have to
- * flush them when they get mapped into an executable vm-area.
- */
-static void
-mark_clean(void *addr, size_t size)
-{
- unsigned long pg_addr, end;
-
- pg_addr = PAGE_ALIGN((unsigned long) addr);
- end = (unsigned long) addr + size;
- while (pg_addr + PAGE_SIZE <= end) {
- struct page *page = virt_to_page(pg_addr);
- set_bit(PG_arch_1, &page->flags);
- pg_addr += PAGE_SIZE;
- }
-}
-
-/*
* Unmap a single streaming mode DMA translation. The dma_addr and size must
* match what was provided for in a previous swiotlb_map_single call. All
* other usages are undefined.
@@ -588,13 +653,13 @@ void
swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, size_t size,
int dir)
{
- char *dma_addr = phys_to_virt(dev_addr);
+ char *dma_addr = bus_to_virt(dev_addr);
BUG_ON(dir == DMA_NONE);
if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end)
unmap_single(hwdev, dma_addr, size, dir);
else if (dir == DMA_FROM_DEVICE)
- mark_clean(dma_addr, size);
+ dma_mark_clean(dma_addr, size);
}
/*
@@ -611,13 +676,13 @@ static inline void
swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
size_t size, int dir, int target)
{
- char *dma_addr = phys_to_virt(dev_addr);
+ char *dma_addr = bus_to_virt(dev_addr);
BUG_ON(dir == DMA_NONE);
if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end)
sync_single(hwdev, dma_addr, size, dir, target);
else if (dir == DMA_FROM_DEVICE)
- mark_clean(dma_addr, size);
+ dma_mark_clean(dma_addr, size);
}
void
@@ -642,13 +707,13 @@ swiotlb_sync_single_range(struct device *hwdev, dma_addr_t dev_addr,
unsigned long offset, size_t size,
int dir, int target)
{
- char *dma_addr = phys_to_virt(dev_addr) + offset;
+ char *dma_addr = bus_to_virt(dev_addr) + offset;
BUG_ON(dir == DMA_NONE);
if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end)
sync_single(hwdev, dma_addr, size, dir, target);
else if (dir == DMA_FROM_DEVICE)
- mark_clean(dma_addr, size);
+ dma_mark_clean(dma_addr, size);
}
void
@@ -687,18 +752,16 @@ int
swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, int nelems,
int dir)
{
- void *addr;
- unsigned long dev_addr;
+ dma_addr_t dev_addr;
int i;
BUG_ON(dir == DMA_NONE);
for (i = 0; i < nelems; i++, sg++) {
- addr = SG_ENT_VIRT_ADDRESS(sg);
- dev_addr = virt_to_phys(addr);
- if (swiotlb_force || address_needs_mapping(hwdev, dev_addr)) {
- void *map = map_single(hwdev, addr, sg->length, dir);
- sg->dma_address = virt_to_bus(map);
+ dev_addr = SG_ENT_PHYS_ADDRESS(sg);
+ if (range_needs_mapping(SG_ENT_VIRT_ADDRESS(sg), sg->length)
+ || address_needs_mapping(hwdev, dev_addr)) {
+ void *map = map_single(hwdev, sg_to_io_tlb_addr(sg), sg->length, dir);
if (!map) {
/* Don't panic here, we expect map_sg users
to do proper error handling. */
@@ -707,6 +770,7 @@ swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, int nelems,
sg[0].dma_length = 0;
return 0;
}
+ sg->dma_address = virt_to_bus(map);
} else
sg->dma_address = dev_addr;
sg->dma_length = sg->length;
@@ -728,9 +792,10 @@ swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nelems,
for (i = 0; i < nelems; i++, sg++)
if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg))
- unmap_single(hwdev, (void *) phys_to_virt(sg->dma_address), sg->dma_length, dir);
+ unmap_single(hwdev, bus_to_virt(sg->dma_address),
+ sg->dma_length, dir);
else if (dir == DMA_FROM_DEVICE)
- mark_clean(SG_ENT_VIRT_ADDRESS(sg), sg->dma_length);
+ dma_mark_clean(SG_ENT_VIRT_ADDRESS(sg), sg->dma_length);
}
/*
@@ -750,8 +815,10 @@ swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sg,
for (i = 0; i < nelems; i++, sg++)
if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg))
- sync_single(hwdev, (void *) sg->dma_address,
+ sync_single(hwdev, bus_to_virt(sg->dma_address),
sg->dma_length, dir, target);
+ else if (dir == DMA_FROM_DEVICE)
+ dma_mark_clean(SG_ENT_VIRT_ADDRESS(sg), sg->dma_length);
}
void
@@ -768,10 +835,48 @@ swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE);
}
+#ifdef SWIOTLB_ARCH_NEED_MAP_PAGE
+
+dma_addr_t
+swiotlb_map_page(struct device *hwdev, struct page *page,
+ unsigned long offset, size_t size,
+ enum dma_data_direction direction)
+{
+ dma_addr_t dev_addr;
+ char *map;
+
+ dev_addr = page_to_bus(page) + offset;
+ if (address_needs_mapping(hwdev, dev_addr)) {
+ map = map_single(hwdev, page_to_io_tlb_addr(page, offset), size, direction);
+ if (!map) {
+ swiotlb_full(hwdev, size, direction, 1);
+ map = io_tlb_overflow_buffer;
+ }
+ dev_addr = virt_to_bus(map);
+ }
+
+ return dev_addr;
+}
+
+void
+swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
+ size_t size, enum dma_data_direction direction)
+{
+ char *dma_addr = bus_to_virt(dev_addr);
+
+ BUG_ON(direction == DMA_NONE);
+ if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end)
+ unmap_single(hwdev, dma_addr, size, direction);
+ else if (direction == DMA_FROM_DEVICE)
+ dma_mark_clean(dma_addr, size);
+}
+
+#endif
+
int
swiotlb_dma_mapping_error(dma_addr_t dma_addr)
{
- return (dma_addr == virt_to_phys(io_tlb_overflow_buffer));
+ return (dma_addr == virt_to_bus(io_tlb_overflow_buffer));
}
/*
@@ -780,10 +885,13 @@ swiotlb_dma_mapping_error(dma_addr_t dma_addr)
* during bus mastering, then you would pass 0x00ffffff as the mask to
* this function.
*/
+#ifndef __swiotlb_dma_supported
+#define __swiotlb_dma_supported(hwdev, mask) (virt_to_bus(io_tlb_end - 1) <= (mask))
+#endif
int
-swiotlb_dma_supported (struct device *hwdev, u64 mask)
+swiotlb_dma_supported(struct device *hwdev, u64 mask)
{
- return (virt_to_phys (io_tlb_end) - 1) <= mask;
+ return __swiotlb_dma_supported(hwdev, mask);
}
EXPORT_SYMBOL(swiotlb_init);
@@ -798,6 +906,4 @@ EXPORT_SYMBOL_GPL(swiotlb_sync_single_range_for_device);
EXPORT_SYMBOL(swiotlb_sync_sg_for_cpu);
EXPORT_SYMBOL(swiotlb_sync_sg_for_device);
EXPORT_SYMBOL(swiotlb_dma_mapping_error);
-EXPORT_SYMBOL(swiotlb_alloc_coherent);
-EXPORT_SYMBOL(swiotlb_free_coherent);
EXPORT_SYMBOL(swiotlb_dma_supported);