aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/DocBook/kernel-api.tmpl2
-rw-r--r--MAINTAINERS7
-rw-r--r--arch/m68k/kernel/entry.S4
-rw-r--r--arch/m68knommu/defconfig112
-rw-r--r--arch/m68knommu/kernel/syscalltable.S4
-rw-r--r--arch/m68knommu/platform/68328/timers.c10
-rw-r--r--arch/x86/Kconfig2
-rw-r--r--arch/x86/kernel/i387.c8
-rw-r--r--arch/x86/kvm/lapic.c4
-rw-r--r--arch/x86/kvm/mmu.c38
-rw-r--r--arch/x86/kvm/paging_tmpl.h20
-rw-r--r--arch/x86/kvm/svm.c26
-rw-r--r--arch/x86/kvm/vmx.c14
-rw-r--r--arch/x86/kvm/x86.c114
-rw-r--r--arch/x86/mm/numa_64.c8
-rw-r--r--arch/x86/xen/setup.c3
-rw-r--r--block/blk-barrier.c9
-rw-r--r--block/blk-core.c7
-rw-r--r--block/blk-map.c27
-rw-r--r--block/blk-merge.c2
-rw-r--r--block/blk-settings.c22
-rw-r--r--block/blk-tag.c2
-rw-r--r--block/blk.h2
-rw-r--r--block/bsg.c8
-rw-r--r--block/genhd.c10
-rw-r--r--block/scsi_ioctl.c4
-rw-r--r--drivers/ata/libata-scsi.c9
-rw-r--r--drivers/block/cciss.c256
-rw-r--r--drivers/block/cciss_scsi.c10
-rw-r--r--drivers/cdrom/cdrom.c4
-rw-r--r--drivers/dma/Kconfig19
-rw-r--r--drivers/dma/Makefile1
-rw-r--r--drivers/dma/fsldma.c1067
-rw-r--r--drivers/dma/fsldma.h189
-rw-r--r--drivers/dma/ioat_dma.c2
-rw-r--r--drivers/net/fec.c8
-rw-r--r--drivers/s390/char/defkeymap.c2
-rw-r--r--fs/cifs/CHANGES4
-rw-r--r--fs/cifs/README2
-rw-r--r--fs/cifs/cifs_debug.c17
-rw-r--r--fs/cifs/cifs_debug.h9
-rw-r--r--fs/cifs/cifs_dfs_ref.c2
-rw-r--r--fs/cifs/cifs_spnego.c2
-rw-r--r--fs/cifs/cifs_unicode.c4
-rw-r--r--fs/cifs/cifs_unicode.h9
-rw-r--r--fs/cifs/cifsacl.c57
-rw-r--r--fs/cifs/cifsfs.c15
-rw-r--r--fs/cifs/cifsglob.h2
-rw-r--r--fs/cifs/cifsproto.h22
-rw-r--r--fs/cifs/cifssmb.c218
-rw-r--r--fs/cifs/connect.c50
-rw-r--r--fs/cifs/dir.c20
-rw-r--r--fs/cifs/dns_resolve.h2
-rw-r--r--fs/cifs/fcntl.c6
-rw-r--r--fs/cifs/file.c33
-rw-r--r--fs/cifs/inode.c399
-rw-r--r--fs/cifs/ioctl.c2
-rw-r--r--fs/cifs/md4.c4
-rw-r--r--fs/cifs/md5.c9
-rw-r--r--fs/cifs/misc.c14
-rw-r--r--fs/cifs/netmisc.c23
-rw-r--r--fs/cifs/readdir.c54
-rw-r--r--fs/cifs/sess.c4
-rw-r--r--fs/cifs/smbdes.c22
-rw-r--r--fs/cifs/transport.c11
-rw-r--r--fs/cifs/xattr.c9
-rw-r--r--fs/proc/proc_misc.c3
-rw-r--r--fs/splice.c12
-rw-r--r--include/asm-m68k/unistd.h6
-rw-r--r--include/asm-m68knommu/machdep.h2
-rw-r--r--include/asm-m68knommu/unistd.h6
-rw-r--r--include/linux/blkdev.h4
-rw-r--r--include/linux/dmaengine.h2
-rw-r--r--include/linux/genhd.h5
-rw-r--r--include/linux/kvm.h4
-rw-r--r--include/linux/kvm_host.h1
-rw-r--r--include/linux/sched.h4
-rw-r--r--kernel/power/process.c29
-rw-r--r--kernel/sched.c283
-rw-r--r--kernel/sched_fair.c115
-rw-r--r--kernel/sched_rt.c4
-rw-r--r--kernel/signal.c16
-rw-r--r--kernel/sysctl.c18
-rw-r--r--virt/kvm/ioapic.c8
-rw-r--r--virt/kvm/kvm_main.c5
85 files changed, 2281 insertions, 1307 deletions
diff --git a/Documentation/DocBook/kernel-api.tmpl b/Documentation/DocBook/kernel-api.tmpl
index f31601e8bd8..dc0f30c3e57 100644
--- a/Documentation/DocBook/kernel-api.tmpl
+++ b/Documentation/DocBook/kernel-api.tmpl
@@ -361,12 +361,14 @@ X!Edrivers/pnp/system.c
<chapter id="blkdev">
<title>Block Devices</title>
!Eblock/blk-core.c
+!Iblock/blk-core.c
!Eblock/blk-map.c
!Iblock/blk-sysfs.c
!Eblock/blk-settings.c
!Eblock/blk-exec.c
!Eblock/blk-barrier.c
!Eblock/blk-tag.c
+!Iblock/blk-tag.c
</chapter>
<chapter id="chrdev">
diff --git a/MAINTAINERS b/MAINTAINERS
index fed09b54733..a0f78e76432 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1589,6 +1589,13 @@ L: linux-fbdev-devel@lists.sourceforge.net (moderated for non-subscribers)
W: http://linux-fbdev.sourceforge.net/
S: Maintained
+FREESCALE DMA DRIVER
+P; Zhang Wei
+M: wei.zhang@freescale.com
+L: linuxppc-embedded@ozlabs.org
+L: linux-kernel@vger.kernel.org
+S: Maintained
+
FREESCALE SOC FS_ENET DRIVER
P: Pantelis Antoniou
M: pantelis.antoniou@gmail.com
diff --git a/arch/m68k/kernel/entry.S b/arch/m68k/kernel/entry.S
index 6dfa3b3c0e2..18a9c5f4b00 100644
--- a/arch/m68k/kernel/entry.S
+++ b/arch/m68k/kernel/entry.S
@@ -742,7 +742,9 @@ sys_call_table:
.long sys_epoll_pwait /* 315 */
.long sys_utimensat
.long sys_signalfd
- .long sys_ni_syscall
+ .long sys_timerfd_create
.long sys_eventfd
.long sys_fallocate /* 320 */
+ .long sys_timerfd_settime
+ .long sys_timerfd_gettime
diff --git a/arch/m68knommu/defconfig b/arch/m68knommu/defconfig
index 648113075f9..670b0a99cfa 100644
--- a/arch/m68knommu/defconfig
+++ b/arch/m68knommu/defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.23
-# Thu Oct 18 13:17:38 2007
+# Linux kernel version: 2.6.25-rc3
+# Mon Feb 25 15:03:00 2008
#
CONFIG_M68K=y
# CONFIG_MMU is not set
@@ -15,8 +15,10 @@ CONFIG_GENERIC_FIND_NEXT_BIT=y
CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_GENERIC_TIME=y
CONFIG_TIME_LOW_RES=y
CONFIG_NO_IOPORT=y
+CONFIG_ARCH_SUPPORTS_AOUT=y
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
#
@@ -31,12 +33,14 @@ CONFIG_LOCALVERSION_AUTO=y
# CONFIG_POSIX_MQUEUE is not set
# CONFIG_BSD_PROCESS_ACCT is not set
# CONFIG_TASKSTATS is not set
-# CONFIG_USER_NS is not set
# CONFIG_AUDIT is not set
# CONFIG_IKCONFIG is not set
CONFIG_LOG_BUF_SHIFT=14
+# CONFIG_CGROUPS is not set
+# CONFIG_GROUP_SCHED is not set
# CONFIG_SYSFS_DEPRECATED is not set
# CONFIG_RELAY is not set
+# CONFIG_NAMESPACES is not set
# CONFIG_BLK_DEV_INITRD is not set
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_SYSCTL=y
@@ -48,15 +52,22 @@ CONFIG_SYSCTL_SYSCALL=y
CONFIG_PRINTK=y
CONFIG_BUG=y
CONFIG_ELF_CORE=y
+CONFIG_COMPAT_BRK=y
CONFIG_BASE_FULL=y
# CONFIG_FUTEX is not set
# CONFIG_EPOLL is not set
# CONFIG_SIGNALFD is not set
+# CONFIG_TIMERFD is not set
# CONFIG_EVENTFD is not set
# CONFIG_VM_EVENT_COUNTERS is not set
CONFIG_SLAB=y
# CONFIG_SLUB is not set
# CONFIG_SLOB is not set
+# CONFIG_PROFILING is not set
+# CONFIG_MARKERS is not set
+# CONFIG_HAVE_OPROFILE is not set
+# CONFIG_HAVE_KPROBES is not set
+CONFIG_SLABINFO=y
CONFIG_TINY_SHMEM=y
CONFIG_BASE_SMALL=0
CONFIG_MODULES=y
@@ -83,6 +94,8 @@ CONFIG_IOSCHED_NOOP=y
# CONFIG_DEFAULT_CFQ is not set
CONFIG_DEFAULT_NOOP=y
CONFIG_DEFAULT_IOSCHED="noop"
+CONFIG_CLASSIC_RCU=y
+# CONFIG_PREEMPT_RCU is not set
#
# Processor type and features
@@ -121,6 +134,7 @@ CONFIG_M5272C3=y
# CONFIG_MOD5272 is not set
CONFIG_FREESCALE=y
CONFIG_4KSTACKS=y
+CONFIG_HZ=100
#
# RAM configuration
@@ -147,6 +161,7 @@ CONFIG_FLATMEM_MANUAL=y
CONFIG_FLATMEM=y
CONFIG_FLAT_NODE_MEM_MAP=y
# CONFIG_SPARSEMEM_STATIC is not set
+# CONFIG_SPARSEMEM_VMEMMAP_ENABLE is not set
CONFIG_SPLIT_PTLOCK_CPUS=4
# CONFIG_RESOURCES_64BIT is not set
CONFIG_ZONE_DMA_FLAG=1
@@ -159,10 +174,6 @@ CONFIG_VIRT_TO_BUS=y
# CONFIG_ARCH_SUPPORTS_MSI is not set
#
-# PCCARD (PCMCIA/CardBus) support
-#
-
-#
# Executable file formats
#
CONFIG_BINFMT_FLAT=y
@@ -205,6 +216,7 @@ CONFIG_IP_FIB_HASH=y
# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_INET_LRO is not set
# CONFIG_INET_DIAG is not set
# CONFIG_TCP_CONG_ADVANCED is not set
CONFIG_TCP_CONG_CUBIC=y
@@ -229,10 +241,6 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
# CONFIG_LAPB is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
-
-#
-# QoS and/or fair queueing
-#
# CONFIG_NET_SCHED is not set
#
@@ -240,6 +248,7 @@ CONFIG_DEFAULT_TCP_CONG="cubic"
#
# CONFIG_NET_PKTGEN is not set
# CONFIG_HAMRADIO is not set
+# CONFIG_CAN is not set
# CONFIG_IRDA is not set
# CONFIG_BT is not set
# CONFIG_AF_RXRPC is not set
@@ -283,6 +292,7 @@ CONFIG_MTD_BLOCK=y
# CONFIG_INFTL is not set
# CONFIG_RFD_FTL is not set
# CONFIG_SSFDC is not set
+# CONFIG_MTD_OOPS is not set
#
# RAM/ROM/Flash chip drivers
@@ -339,10 +349,11 @@ CONFIG_BLK_DEV=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_COUNT=16
CONFIG_BLK_DEV_RAM_SIZE=4096
-CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024
+# CONFIG_BLK_DEV_XIP is not set
# CONFIG_CDROM_PKTCDVD is not set
# CONFIG_ATA_OVER_ETH is not set
# CONFIG_MISC_DEVICES is not set
+CONFIG_HAVE_IDE=y
# CONFIG_IDE is not set
#
@@ -360,9 +371,15 @@ CONFIG_NETDEVICES=y
# CONFIG_MACVLAN is not set
# CONFIG_EQUALIZER is not set
# CONFIG_TUN is not set
+# CONFIG_VETH is not set
# CONFIG_PHYLIB is not set
CONFIG_NET_ETHERNET=y
# CONFIG_MII is not set
+# CONFIG_IBM_NEW_EMAC_ZMII is not set
+# CONFIG_IBM_NEW_EMAC_RGMII is not set
+# CONFIG_IBM_NEW_EMAC_TAH is not set
+# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
+# CONFIG_B44 is not set
CONFIG_FEC=y
# CONFIG_FEC2 is not set
# CONFIG_NETDEV_1000 is not set
@@ -377,7 +394,7 @@ CONFIG_FEC=y
CONFIG_PPP=y
# CONFIG_PPP_MULTILINK is not set
# CONFIG_PPP_FILTER is not set
-# CONFIG_PPP_ASYNC is not set
+CONFIG_PPP_ASYNC=y
# CONFIG_PPP_SYNC_TTY is not set
# CONFIG_PPP_DEFLATE is not set
# CONFIG_PPP_BSDCOMP is not set
@@ -386,7 +403,6 @@ CONFIG_PPP=y
# CONFIG_PPPOL2TP is not set
# CONFIG_SLIP is not set
CONFIG_SLHC=y
-# CONFIG_SHAPER is not set
# CONFIG_NETCONSOLE is not set
# CONFIG_NETPOLL is not set
# CONFIG_NET_POLL_CONTROLLER is not set
@@ -418,12 +434,16 @@ CONFIG_SLHC=y
#
# Non-8250 serial port support
#
-CONFIG_SERIAL_COLDFIRE=y
+CONFIG_SERIAL_CORE=y
+CONFIG_SERIAL_CORE_CONSOLE=y
+# CONFIG_SERIAL_COLDFIRE is not set
+CONFIG_SERIAL_MCF=y
+CONFIG_SERIAL_MCF_BAUDRATE=19200
+CONFIG_SERIAL_MCF_CONSOLE=y
# CONFIG_UNIX98_PTYS is not set
CONFIG_LEGACY_PTYS=y
CONFIG_LEGACY_PTY_COUNT=256
# CONFIG_IPMI_HANDLER is not set
-# CONFIG_WATCHDOG is not set
# CONFIG_HW_RANDOM is not set
# CONFIG_GEN_RTC is not set
# CONFIG_R3964 is not set
@@ -439,6 +459,14 @@ CONFIG_LEGACY_PTY_COUNT=256
# CONFIG_W1 is not set
# CONFIG_POWER_SUPPLY is not set
# CONFIG_HWMON is not set
+# CONFIG_THERMAL is not set
+# CONFIG_WATCHDOG is not set
+
+#
+# Sonics Silicon Backplane
+#
+CONFIG_SSB_POSSIBLE=y
+# CONFIG_SSB is not set
#
# Multifunction device drivers
@@ -450,20 +478,20 @@ CONFIG_LEGACY_PTY_COUNT=256
#
# CONFIG_VIDEO_DEV is not set
# CONFIG_DVB_CORE is not set
-CONFIG_DAB=y
+# CONFIG_DAB is not set
#
# Graphics support
#
+# CONFIG_VGASTATE is not set
+# CONFIG_VIDEO_OUTPUT_CONTROL is not set
+# CONFIG_FB is not set
# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
#
# Display device support
#
# CONFIG_DISPLAY_SUPPORT is not set
-# CONFIG_VGASTATE is not set
-CONFIG_VIDEO_OUTPUT_CONTROL=y
-# CONFIG_FB is not set
#
# Sound
@@ -471,23 +499,11 @@ CONFIG_VIDEO_OUTPUT_CONTROL=y
# CONFIG_SOUND is not set
# CONFIG_USB_SUPPORT is not set
# CONFIG_MMC is not set
+# CONFIG_MEMSTICK is not set
# CONFIG_NEW_LEDS is not set
# CONFIG_RTC_CLASS is not set
#
-# DMA Engine support
-#
-# CONFIG_DMA_ENGINE is not set
-
-#
-# DMA Clients
-#
-
-#
-# DMA Devices
-#
-
-#
# Userspace I/O
#
# CONFIG_UIO is not set
@@ -505,11 +521,9 @@ CONFIG_EXT2_FS=y
# CONFIG_XFS_FS is not set
# CONFIG_GFS2_FS is not set
# CONFIG_OCFS2_FS is not set
-# CONFIG_MINIX_FS is not set
-CONFIG_ROMFS_FS=y
+# CONFIG_DNOTIFY is not set
# CONFIG_INOTIFY is not set
# CONFIG_QUOTA is not set
-# CONFIG_DNOTIFY is not set
# CONFIG_AUTOFS_FS is not set
# CONFIG_AUTOFS4_FS is not set
# CONFIG_FUSE_FS is not set
@@ -535,7 +549,6 @@ CONFIG_PROC_SYSCTL=y
CONFIG_SYSFS=y
# CONFIG_TMPFS is not set
# CONFIG_HUGETLB_PAGE is not set
-CONFIG_RAMFS=y
# CONFIG_CONFIGFS_FS is not set
#
@@ -551,42 +564,27 @@ CONFIG_RAMFS=y
# CONFIG_JFFS2_FS is not set
# CONFIG_CRAMFS is not set
# CONFIG_VXFS_FS is not set
+# CONFIG_MINIX_FS is not set
# CONFIG_HPFS_FS is not set
# CONFIG_QNX4FS_FS is not set
+CONFIG_ROMFS_FS=y
# CONFIG_SYSV_FS is not set
# CONFIG_UFS_FS is not set
-
-#
-# Network File Systems
-#
-# CONFIG_NFS_FS is not set
-# CONFIG_NFSD is not set
-# CONFIG_SMB_FS is not set
-# CONFIG_CIFS is not set
-# CONFIG_NCP_FS is not set
-# CONFIG_CODA_FS is not set
-# CONFIG_AFS_FS is not set
+# CONFIG_NETWORK_FILESYSTEMS is not set
#
# Partition Types
#
# CONFIG_PARTITION_ADVANCED is not set
CONFIG_MSDOS_PARTITION=y
-
-#
-# Native Language Support
-#
# CONFIG_NLS is not set
-
-#
-# Distributed Lock Manager
-#
# CONFIG_DLM is not set
#
# Kernel hacking
#
# CONFIG_PRINTK_TIME is not set
+CONFIG_ENABLE_WARN_DEPRECATED=y
# CONFIG_ENABLE_MUST_CHECK is not set
# CONFIG_MAGIC_SYSRQ is not set
# CONFIG_UNUSED_SYMBOLS is not set
@@ -594,6 +592,7 @@ CONFIG_MSDOS_PARTITION=y
# CONFIG_HEADERS_CHECK is not set
# CONFIG_DEBUG_KERNEL is not set
# CONFIG_DEBUG_BUGVERBOSE is not set
+# CONFIG_SAMPLES is not set
# CONFIG_FULLDEBUG is not set
# CONFIG_HIGHPROFILE is not set
# CONFIG_BOOTPARAM is not set
@@ -605,6 +604,7 @@ CONFIG_MSDOS_PARTITION=y
#
# CONFIG_KEYS is not set
# CONFIG_SECURITY is not set
+# CONFIG_SECURITY_FILE_CAPABILITIES is not set
# CONFIG_CRYPTO is not set
#
diff --git a/arch/m68knommu/kernel/syscalltable.S b/arch/m68knommu/kernel/syscalltable.S
index 1b02b882006..fca2e49917a 100644
--- a/arch/m68knommu/kernel/syscalltable.S
+++ b/arch/m68knommu/kernel/syscalltable.S
@@ -336,9 +336,11 @@ ENTRY(sys_call_table)
.long sys_epoll_pwait /* 315 */
.long sys_utimensat
.long sys_signalfd
- .long sys_ni_syscall
+ .long sys_timerfd_create
.long sys_eventfd
.long sys_fallocate /* 320 */
+ .long sys_timerfd_settime
+ .long sys_timerfd_gettime
.rept NR_syscalls-(.-sys_call_table)/4
.long sys_ni_syscall
diff --git a/arch/m68knommu/platform/68328/timers.c b/arch/m68knommu/platform/68328/timers.c
index 9159fd05c9a..6bafefa546e 100644
--- a/arch/m68knommu/platform/68328/timers.c
+++ b/arch/m68knommu/platform/68328/timers.c
@@ -67,16 +67,6 @@ static irqreturn_t hw_tick(int irq, void *dummy)
/***************************************************************************/
-static irqreturn_t hw_tick(int irq, void *dummy)
-{
- /* Reset Timer1 */
- TSTAT &= 0;
-
- return arch_timer_interrupt(irq, dummy);
-}
-
-/***************************************************************************/
-
static struct irqaction m68328_timer_irq = {
.name = "timer",
.flags = IRQF_DISABLED | IRQF_TIMER,
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 4a88cf7695b..53800b80a20 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -21,7 +21,7 @@ config X86
select HAVE_IDE
select HAVE_OPROFILE
select HAVE_KPROBES
- select HAVE_KVM
+ select HAVE_KVM if ((X86_32 && !X86_VOYAGER && !X86_VISWS && !X86_NUMAQ) || X86_64)
config GENERIC_LOCKBREAK
diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
index 763dfc40723..60fe8015756 100644
--- a/arch/x86/kernel/i387.c
+++ b/arch/x86/kernel/i387.c
@@ -132,7 +132,7 @@ int xfpregs_get(struct task_struct *target, const struct user_regset *regset,
if (!cpu_has_fxsr)
return -ENODEV;
- unlazy_fpu(target);
+ init_fpu(target);
return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
&target->thread.i387.fxsave, 0, -1);
@@ -147,7 +147,7 @@ int xfpregs_set(struct task_struct *target, const struct user_regset *regset,
if (!cpu_has_fxsr)
return -ENODEV;
- unlazy_fpu(target);
+ init_fpu(target);
set_stopped_child_used_math(target);
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
@@ -307,7 +307,7 @@ int fpregs_get(struct task_struct *target, const struct user_regset *regset,
if (!HAVE_HWFP)
return fpregs_soft_get(target, regset, pos, count, kbuf, ubuf);
- unlazy_fpu(target);
+ init_fpu(target);
if (!cpu_has_fxsr)
return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
@@ -332,7 +332,7 @@ int fpregs_set(struct task_struct *target, const struct user_regset *regset,
if (!HAVE_HWFP)
return fpregs_soft_set(target, regset, pos, count, kbuf, ubuf);
- unlazy_fpu(target);
+ init_fpu(target);
set_stopped_child_used_math(target);
if (!cpu_has_fxsr)
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 2cbee9479ce..68a6b151193 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -647,6 +647,10 @@ static void start_apic_timer(struct kvm_lapic *apic)
apic->timer.period = apic_get_reg(apic, APIC_TMICT) *
APIC_BUS_CYCLE_NS * apic->timer.divide_count;
atomic_set(&apic->timer.pending, 0);
+
+ if (!apic->timer.period)
+ return;
+
hrtimer_start(&apic->timer.dev,
ktime_add_ns(now, apic->timer.period),
HRTIMER_MODE_ABS);
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 8efdcdbebb0..d8172aabc66 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -681,8 +681,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
unsigned level,
int metaphysical,
unsigned access,
- u64 *parent_pte,
- bool *new_page)
+ u64 *parent_pte)
{
union kvm_mmu_page_role role;
unsigned index;
@@ -722,8 +721,6 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
vcpu->arch.mmu.prefetch_page(vcpu, sp);
if (!metaphysical)
rmap_write_protect(vcpu->kvm, gfn);
- if (new_page)
- *new_page = 1;
return sp;
}
@@ -876,11 +873,18 @@ static void page_header_update_slot(struct kvm *kvm, void *pte, gfn_t gfn)
struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva)
{
+ struct page *page;
+
gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gva);
if (gpa == UNMAPPED_GVA)
return NULL;
- return gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT);
+
+ down_read(&current->mm->mmap_sem);
+ page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT);
+ up_read(&current->mm->mmap_sem);
+
+ return page;
}
static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
@@ -999,8 +1003,7 @@ static int __nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write,
>> PAGE_SHIFT;
new_table = kvm_mmu_get_page(vcpu, pseudo_gfn,
v, level - 1,
- 1, ACC_ALL, &table[index],
- NULL);
+ 1, ACC_ALL, &table[index]);
if (!new_table) {
pgprintk("nonpaging_map: ENOMEM\n");
kvm_release_page_clean(page);
@@ -1020,15 +1023,18 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn)
struct page *page;
+ down_read(&vcpu->kvm->slots_lock);
+
down_read(&current->mm->mmap_sem);
page = gfn_to_page(vcpu->kvm, gfn);
+ up_read(&current->mm->mmap_sem);
spin_lock(&vcpu->kvm->mmu_lock);
kvm_mmu_free_some_pages(vcpu);
r = __nonpaging_map(vcpu, v, write, gfn, page);
spin_unlock(&vcpu->kvm->mmu_lock);
- up_read(&current->mm->mmap_sem);
+ up_read(&vcpu->kvm->slots_lock);
return r;
}
@@ -1090,7 +1096,7 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
ASSERT(!VALID_PAGE(root));
sp = kvm_mmu_get_page(vcpu, root_gfn, 0,
- PT64_ROOT_LEVEL, 0, ACC_ALL, NULL, NULL);
+ PT64_ROOT_LEVEL, 0, ACC_ALL, NULL);
root = __pa(sp->spt);
++sp->root_count;
vcpu->arch.mmu.root_hpa = root;
@@ -1111,7 +1117,7 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
root_gfn = 0;
sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30,
PT32_ROOT_LEVEL, !is_paging(vcpu),
- ACC_ALL, NULL, NULL);
+ ACC_ALL, NULL);
root = __pa(sp->spt);
++sp->root_count;
vcpu->arch.mmu.pae_root[i] = root | PT_PRESENT_MASK;
@@ -1172,7 +1178,7 @@ void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu)
static void paging_new_cr3(struct kvm_vcpu *vcpu)
{
- pgprintk("%s: cr3 %lx\n", __FUNCTION__, vcpu->cr3);
+ pgprintk("%s: cr3 %lx\n", __FUNCTION__, vcpu->arch.cr3);
mmu_free_roots(vcpu);
}
@@ -1362,6 +1368,7 @@ static void mmu_guess_page_from_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
gfn_t gfn;
int r;
u64 gpte = 0;
+ struct page *page;
if (bytes != 4 && bytes != 8)
return;
@@ -1389,6 +1396,11 @@ static void mmu_guess_page_from_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
if (!is_present_pte(gpte))
return;
gfn = (gpte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
+
+ down_read(&current->mm->mmap_sem);
+ page = gfn_to_page(vcpu->kvm, gfn);
+ up_read(&current->mm->mmap_sem);
+
vcpu->arch.update_pte.gfn = gfn;
vcpu->arch.update_pte.page = gfn_to_page(vcpu->kvm, gfn);
}
@@ -1496,9 +1508,9 @@ int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
gpa_t gpa;
int r;
- down_read(&current->mm->mmap_sem);
+ down_read(&vcpu->kvm->slots_lock);
gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gva);
- up_read(&current->mm->mmap_sem);
+ up_read(&vcpu->kvm->slots_lock);
spin_lock(&vcpu->kvm->mmu_lock);
r = kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT);
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 03ba8608fe0..ecc0856268c 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -91,7 +91,10 @@ static bool FNAME(cmpxchg_gpte)(struct kvm *kvm,
pt_element_t *table;
struct page *page;
+ down_read(&current->mm->mmap_sem);
page = gfn_to_page(kvm, table_gfn);
+ up_read(&current->mm->mmap_sem);
+
table = kmap_atomic(page, KM_USER0);
ret = CMPXCHG(&table[index], orig_pte, new_pte);
@@ -140,7 +143,7 @@ walk:
}
#endif
ASSERT((!is_long_mode(vcpu) && is_pae(vcpu)) ||
- (vcpu->cr3 & CR3_NONPAE_RESERVED_BITS) == 0);
+ (vcpu->arch.cr3 & CR3_NONPAE_RESERVED_BITS) == 0);
pt_access = ACC_ALL;
@@ -297,7 +300,6 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
u64 shadow_pte;
int metaphysical;
gfn_t table_gfn;
- bool new_page = 0;
shadow_ent = ((u64 *)__va(shadow_addr)) + index;
if (level == PT_PAGE_TABLE_LEVEL)
@@ -319,8 +321,8 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
}
shadow_page = kvm_mmu_get_page(vcpu, table_gfn, addr, level-1,
metaphysical, access,
- shadow_ent, &new_page);
- if (new_page && !metaphysical) {
+ shadow_ent);
+ if (!metaphysical) {
int r;
pt_element_t curr_pte;
r = kvm_read_guest_atomic(vcpu->kvm,
@@ -378,7 +380,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
if (r)
return r;
- down_read(&current->mm->mmap_sem);
+ down_read(&vcpu->kvm->slots_lock);
/*
* Look up the shadow pte for the faulting address.
*/
@@ -392,11 +394,13 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
pgprintk("%s: guest page fault\n", __FUNCTION__);
inject_page_fault(vcpu, addr, walker.error_code);
vcpu->arch.last_pt_write_count = 0; /* reset fork detector */
- up_read(&current->mm->mmap_sem);
+ up_read(&vcpu->kvm->slots_lock);
return 0;
}
+ down_read(&current->mm->mmap_sem);
page = gfn_to_page(vcpu->kvm, walker.gfn);
+ up_read(&current->mm->mmap_sem);
spin_lock(&vcpu->kvm->mmu_lock);
kvm_mmu_free_some_pages(vcpu);
@@ -413,14 +417,14 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
*/
if (shadow_pte && is_io_pte(*shadow_pte)) {
spin_unlock(&vcpu->kvm->mmu_lock);
- up_read(&current->mm->mmap_sem);
+ up_read(&vcpu->kvm->slots_lock);
return 1;
}
++vcpu->stat.pf_fixed;
kvm_mmu_audit(vcpu, "post page fault (fixed)");
spin_unlock(&vcpu->kvm->mmu_lock);
- up_read(&current->mm->mmap_sem);
+ up_read(&vcpu->kvm->slots_lock);
return write_pt;
}
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index de755cb1431..1a582f1090e 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -792,6 +792,10 @@ static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
vcpu->arch.cr0 = cr0;
cr0 |= X86_CR0_PG | X86_CR0_WP;
cr0 &= ~(X86_CR0_CD | X86_CR0_NW);
+ if (!vcpu->fpu_active) {
+ svm->vmcb->control.intercept_exceptions |= (1 << NM_VECTOR);
+ cr0 |= X86_CR0_TS;
+ }
svm->vmcb->save.cr0 = cr0;
}
@@ -1096,6 +1100,24 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 *data)
case MSR_IA32_SYSENTER_ESP:
*data = svm->vmcb->save.sysenter_esp;
break;
+ /* Nobody will change the following 5 values in the VMCB so
+ we can safely return them on rdmsr. They will always be 0
+ until LBRV is implemented. */
+ case MSR_IA32_DEBUGCTLMSR:
+ *data = svm->vmcb->save.dbgctl;
+ break;
+ case MSR_IA32_LASTBRANCHFROMIP:
+ *data = svm->vmcb->save.br_from;
+ break;
+ case MSR_IA32_LASTBRANCHTOIP:
+ *data = svm->vmcb->save.br_to;
+ break;
+ case MSR_IA32_LASTINTFROMIP:
+ *data = svm->vmcb->save.last_excp_from;
+ break;
+ case MSR_IA32_LASTINTTOIP:
+ *data = svm->vmcb->save.last_excp_to;
+ break;
default:
return kvm_get_msr_common(vcpu, ecx, data);
}
@@ -1156,6 +1178,10 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data)
case MSR_IA32_SYSENTER_ESP:
svm->vmcb->save.sysenter_esp = data;
break;
+ case MSR_IA32_DEBUGCTLMSR:
+ pr_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTLMSR 0x%llx, nop\n",
+ __FUNCTION__, data);
+ break;
case MSR_K7_EVNTSEL0:
case MSR_K7_EVNTSEL1:
case MSR_K7_EVNTSEL2:
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index ad36447e696..94ea724638f 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -638,6 +638,7 @@ static void setup_msrs(struct vcpu_vmx *vmx)
{
int save_nmsrs;
+ vmx_load_host_state(vmx);
save_nmsrs = 0;
#ifdef CONFIG_X86_64
if (is_long_mode(&vmx->vcpu)) {
@@ -1477,7 +1478,7 @@ static int alloc_apic_access_page(struct kvm *kvm)
struct kvm_userspace_memory_region kvm_userspace_mem;
int r = 0;
- down_write(&current->mm->mmap_sem);
+ down_write(&kvm->slots_lock);
if (kvm->arch.apic_access_page)
goto out;
kvm_userspace_mem.slot = APIC_ACCESS_PAGE_PRIVATE_MEMSLOT;
@@ -1487,9 +1488,12 @@ static int alloc_apic_access_page(struct kvm *kvm)
r = __kvm_set_memory_region(kvm, &kvm_userspace_mem, 0);
if (r)
goto out;
+
+ down_read(&current->mm->mmap_sem);
kvm->arch.apic_access_page = gfn_to_page(kvm, 0xfee00);
+ up_read(&current->mm->mmap_sem);
out:
- up_write(&current->mm->mmap_sem);
+ up_write(&kvm->slots_lock);
return r;
}
@@ -1602,9 +1606,6 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
vmcs_writel(CR0_GUEST_HOST_MASK, ~0UL);
vmcs_writel(CR4_GUEST_HOST_MASK, KVM_GUEST_CR4_MASK);
- if (vm_need_virtualize_apic_accesses(vmx->vcpu.kvm))
- if (alloc_apic_access_page(vmx->vcpu.kvm) != 0)
- return -ENOMEM;
return 0;
}
@@ -2534,6 +2535,9 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
put_cpu();
if (err)
goto free_vmcs;
+ if (vm_need_virtualize_apic_accesses(kvm))
+ if (alloc_apic_access_page(kvm) != 0)
+ goto free_vmcs;
return &vmx->vcpu;
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index cf530814868..6b01552bd1f 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -46,6 +46,9 @@
#define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
+static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
+ struct kvm_cpuid_entry2 __user *entries);
+
struct kvm_x86_ops *kvm_x86_ops;
struct kvm_stats_debugfs_item debugfs_entries[] = {
@@ -181,7 +184,7 @@ int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3)
int ret;
u64 pdpte[ARRAY_SIZE(vcpu->arch.pdptrs)];
- down_read(&current->mm->mmap_sem);
+ down_read(&vcpu->kvm->slots_lock);
ret = kvm_read_guest_page(vcpu->kvm, pdpt_gfn, pdpte,
offset * sizeof(u64), sizeof(pdpte));
if (ret < 0) {
@@ -198,7 +201,7 @@ int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3)
memcpy(vcpu->arch.pdptrs, pdpte, sizeof(vcpu->arch.pdptrs));
out:
- up_read(&current->mm->mmap_sem);
+ up_read(&vcpu->kvm->slots_lock);
return ret;
}
@@ -212,13 +215,13 @@ static bool pdptrs_changed(struct kvm_vcpu *vcpu)
if (is_long_mode(vcpu) || !is_pae(vcpu))
return false;
- down_read(&current->mm->mmap_sem);
+ down_read(&vcpu->kvm->slots_lock);
r = kvm_read_guest(vcpu->kvm, vcpu->arch.cr3 & ~31u, pdpte, sizeof(pdpte));
if (r < 0)
goto out;
changed = memcmp(pdpte, vcpu->arch.pdptrs, sizeof(pdpte)) != 0;
out:
- up_read(&current->mm->mmap_sem);
+ up_read(&vcpu->kvm->slots_lock);
return changed;
}
@@ -356,7 +359,7 @@ void set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
*/
}
- down_read(&current->mm->mmap_sem);
+ down_read(&vcpu->kvm->slots_lock);
/*
* Does the new cr3 value map to physical memory? (Note, we
* catch an invalid cr3 even in real-mode, because it would
@@ -372,7 +375,7 @@ void set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
vcpu->arch.cr3 = cr3;
vcpu->arch.mmu.new_cr3(vcpu);
}
- up_read(&current->mm->mmap_sem);
+ up_read(&vcpu->kvm->slots_lock);
}
EXPORT_SYMBOL_GPL(set_cr3);
@@ -484,6 +487,10 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
pr_unimpl(vcpu, "%s: MSR_IA32_MCG_STATUS 0x%llx, nop\n",
__FUNCTION__, data);
break;
+ case MSR_IA32_MCG_CTL:
+ pr_unimpl(vcpu, "%s: MSR_IA32_MCG_CTL 0x%llx, nop\n",
+ __FUNCTION__, data);
+ break;
case MSR_IA32_UCODE_REV:
case MSR_IA32_UCODE_WRITE:
case 0x200 ... 0x2ff: /* MTRRs */
@@ -526,6 +533,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
case MSR_IA32_MC0_CTL:
case MSR_IA32_MCG_STATUS:
case MSR_IA32_MCG_CAP:
+ case MSR_IA32_MCG_CTL:
case MSR_IA32_MC0_MISC:
case MSR_IA32_MC0_MISC+4:
case MSR_IA32_MC0_MISC+8:
@@ -727,6 +735,24 @@ long kvm_arch_dev_ioctl(struct file *filp,
r = 0;
break;
}
+ case KVM_GET_SUPPORTED_CPUID: {
+ struct kvm_cpuid2 __user *cpuid_arg = argp;
+ struct kvm_cpuid2 cpuid;
+
+ r = -EFAULT;
+ if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
+ goto out;
+ r = kvm_dev_ioctl_get_supported_cpuid(&cpuid,
+ cpuid_arg->entries);
+ if (r)
+ goto out;
+
+ r = -EFAULT;
+ if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid))
+ goto out;
+ r = 0;
+ break;
+ }
default:
r = -EINVAL;
}
@@ -974,8 +1000,7 @@ static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
put_cpu();
}
-static int kvm_vm_ioctl_get_supported_cpuid(struct kvm *kvm,
- struct kvm_cpuid2 *cpuid,
+static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
struct kvm_cpuid_entry2 __user *entries)
{
struct kvm_cpuid_entry2 *cpuid_entries;
@@ -1207,12 +1232,12 @@ static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm,
if (kvm_nr_mmu_pages < KVM_MIN_ALLOC_MMU_PAGES)
return -EINVAL;
- down_write(&current->mm->mmap_sem);
+ down_write(&kvm->slots_lock);
kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages);
kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages;
- up_write(&current->mm->mmap_sem);
+ up_write(&kvm->slots_lock);
return 0;
}
@@ -1261,7 +1286,7 @@ static int kvm_vm_ioctl_set_memory_alias(struct kvm *kvm,
< alias->target_phys_addr)
goto out;
- down_write(&current->mm->mmap_sem);
+ down_write(&kvm->slots_lock);
p = &kvm->arch.aliases[alias->slot];
p->base_gfn = alias->guest_phys_addr >> PAGE_SHIFT;
@@ -1275,7 +1300,7 @@ static int kvm_vm_ioctl_set_memory_alias(struct kvm *kvm,
kvm_mmu_zap_all(kvm);
- up_write(&current->mm->mmap_sem);
+ up_write(&kvm->slots_lock);
return 0;
@@ -1351,7 +1376,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
struct kvm_memory_slot *memslot;
int is_dirty = 0;
- down_write(&current->mm->mmap_sem);
+ down_write(&kvm->slots_lock);
r = kvm_get_dirty_log(kvm, log, &is_dirty);
if (r)
@@ -1367,7 +1392,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
}
r = 0;
out:
- up_write(&current->mm->mmap_sem);
+ up_write(&kvm->slots_lock);
return r;
}
@@ -1487,24 +1512,6 @@ long kvm_arch_vm_ioctl(struct file *filp,
r = 0;
break;
}
- case KVM_GET_SUPPORTED_CPUID: {
- struct kvm_cpuid2 __user *cpuid_arg = argp;
- struct kvm_cpuid2 cpuid;
-
- r = -EFAULT;
- if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
- goto out;
- r = kvm_vm_ioctl_get_supported_cpuid(kvm, &cpuid,
- cpuid_arg->entries);
- if (r)
- goto out;
-
- r = -EFAULT;
- if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid))
- goto out;
- r = 0;
- break;
- }
default:
;
}
@@ -1563,7 +1570,7 @@ int emulator_read_std(unsigned long addr,
void *data = val;
int r = X86EMUL_CONTINUE;
- down_read(&current->mm->mmap_sem);
+ down_read(&vcpu->kvm->slots_lock);
while (bytes) {
gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
unsigned offset = addr & (PAGE_SIZE-1);
@@ -1585,7 +1592,7 @@ int emulator_read_std(unsigned long addr,
addr += tocopy;
}
out:
- up_read(&current->mm->mmap_sem);
+ up_read(&vcpu->kvm->slots_lock);
return r;
}
EXPORT_SYMBOL_GPL(emulator_read_std);
@@ -1604,9 +1611,9 @@ static int emulator_read_emulated(unsigned long addr,
return X86EMUL_CONTINUE;
}
- down_read(&current->mm->mmap_sem);
+ down_read(&vcpu->kvm->slots_lock);
gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
- up_read(&current->mm->mmap_sem);
+ up_read(&vcpu->kvm->slots_lock);
/* For APIC access vmexit */
if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
@@ -1644,14 +1651,14 @@ static int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
{
int ret;
- down_read(&current->mm->mmap_sem);
+ down_read(&vcpu->kvm->slots_lock);
ret = kvm_write_guest(vcpu->kvm, gpa, val, bytes);
if (ret < 0) {
- up_read(&current->mm->mmap_sem);
+ up_read(&vcpu->kvm->slots_lock);
return 0;
}
kvm_mmu_pte_write(vcpu, gpa, val, bytes);
- up_read(&current->mm->mmap_sem);
+ up_read(&vcpu->kvm->slots_lock);
return 1;
}
@@ -1663,9 +1670,9 @@ static int emulator_write_emulated_onepage(unsigned long addr,
struct kvm_io_device *mmio_dev;
gpa_t gpa;
- down_read(&current->mm->mmap_sem);
+ down_read(&vcpu->kvm->slots_lock);
gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
- up_read(&current->mm->mmap_sem);
+ up_read(&vcpu->kvm->slots_lock);
if (gpa == UNMAPPED_GVA) {
kvm_inject_page_fault(vcpu, addr, 2);
@@ -1742,7 +1749,7 @@ static int emulator_cmpxchg_emulated(unsigned long addr,
char *kaddr;
u64 val;
- down_read(&current->mm->mmap_sem);
+ down_read(&vcpu->kvm->slots_lock);
gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
if (gpa == UNMAPPED_GVA ||
@@ -1753,13 +1760,17 @@ static int emulator_cmpxchg_emulated(unsigned long addr,
goto emul_write;
val = *(u64 *)new;
+
+ down_read(&current->mm->mmap_sem);
page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT);
+ up_read(&current->mm->mmap_sem);
+
kaddr = kmap_atomic(page, KM_USER0);
set_64bit((u64 *)(kaddr + offset_in_page(gpa)), val);
kunmap_atomic(kaddr, KM_USER0);
kvm_release_page_dirty(page);
emul_write:
- up_read(&current->mm->mmap_sem);
+ up_read(&vcpu->kvm->slots_lock);
}
#endif
@@ -2152,10 +2163,10 @@ int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
kvm_x86_ops->skip_emulated_instruction(vcpu);
for (i = 0; i < nr_pages; ++i) {
- down_read(&current->mm->mmap_sem);
+ down_read(&vcpu->kvm->slots_lock);
page = gva_to_page(vcpu, address + i * PAGE_SIZE);
vcpu->arch.pio.guest_pages[i] = page;
- up_read(&current->mm->mmap_sem);
+ up_read(&vcpu->kvm->slots_lock);
if (!page) {
kvm_inject_gp(vcpu, 0);
free_pio_guest_pages(vcpu);
@@ -2478,8 +2489,9 @@ static void vapic_enter(struct kvm_vcpu *vcpu)
down_read(&current->mm->mmap_sem);
page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
- vcpu->arch.apic->vapic_page = page;
up_read(&current->mm->mmap_sem);
+
+ vcpu->arch.apic->vapic_page = page;
}
static void vapic_exit(struct kvm_vcpu *vcpu)
@@ -2861,8 +2873,8 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
kvm_x86_ops->decache_cr4_guest_bits(vcpu);
mmu_reset_needed |= vcpu->arch.cr0 != sregs->cr0;
- vcpu->arch.cr0 = sregs->cr0;
kvm_x86_ops->set_cr0(vcpu, sregs->cr0);
+ vcpu->arch.cr0 = sregs->cr0;
mmu_reset_needed |= vcpu->arch.cr4 != sregs->cr4;
kvm_x86_ops->set_cr4(vcpu, sregs->cr4);
@@ -2952,9 +2964,9 @@ int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
gpa_t gpa;
vcpu_load(vcpu);
- down_read(&current->mm->mmap_sem);
+ down_read(&vcpu->kvm->slots_lock);
gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, vaddr);
- up_read(&current->mm->mmap_sem);
+ up_read(&vcpu->kvm->slots_lock);
tr->physical_address = gpa;
tr->valid = gpa != UNMAPPED_GVA;
tr->writeable = 1;
@@ -3227,11 +3239,13 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
*/
if (!user_alloc) {
if (npages && !old.rmap) {
+ down_write(&current->mm->mmap_sem);
memslot->userspace_addr = do_mmap(NULL, 0,
npages * PAGE_SIZE,
PROT_READ | PROT_WRITE,
MAP_SHARED | MAP_ANONYMOUS,
0);
+ up_write(&current->mm->mmap_sem);
if (IS_ERR((void *)memslot->userspace_addr))
return PTR_ERR((void *)memslot->userspace_addr);
@@ -3239,8 +3253,10 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
if (!old.user_alloc && old.rmap) {
int ret;
+ down_write(&current->mm->mmap_sem);
ret = do_munmap(current->mm, old.userspace_addr,
old.npages * PAGE_SIZE);
+ up_write(&current->mm->mmap_sem);
if (ret < 0)
printk(KERN_WARNING
"kvm_vm_ioctl_set_memory_region: "
diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c
index 59898fb0a4a..8ccfee10f5b 100644
--- a/arch/x86/mm/numa_64.c
+++ b/arch/x86/mm/numa_64.c
@@ -622,13 +622,17 @@ void __init init_cpu_to_node(void)
int i;
for (i = 0; i < NR_CPUS; i++) {
+ int node;
u16 apicid = x86_cpu_to_apicid_init[i];
if (apicid == BAD_APICID)
continue;
- if (apicid_to_node[apicid] == NUMA_NO_NODE)
+ node = apicid_to_node[apicid];
+ if (node == NUMA_NO_NODE)
continue;
- numa_set_node(i, apicid_to_node[apicid]);
+ if (!node_online(node))
+ continue;
+ numa_set_node(i, node);
}
}
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index 3bad4773a2f..2341492bf7a 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -38,7 +38,8 @@ char * __init xen_memory_setup(void)
unsigned long max_pfn = xen_start_info->nr_pages;
e820.nr_map = 0;
- add_memory_region(0, PFN_PHYS(max_pfn), E820_RAM);
+ add_memory_region(0, LOWMEMSIZE(), E820_RAM);
+ add_memory_region(HIGH_MEMORY, PFN_PHYS(max_pfn)-HIGH_MEMORY, E820_RAM);
return "Xen";
}
diff --git a/block/blk-barrier.c b/block/blk-barrier.c
index 6901eedeffc..55c5f1fc4f1 100644
--- a/block/blk-barrier.c
+++ b/block/blk-barrier.c
@@ -259,8 +259,11 @@ int blk_do_ordered(struct request_queue *q, struct request **rqp)
static void bio_end_empty_barrier(struct bio *bio, int err)
{
- if (err)
+ if (err) {
+ if (err == -EOPNOTSUPP)
+ set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
clear_bit(BIO_UPTODATE, &bio->bi_flags);
+ }
complete(bio->bi_private);
}
@@ -309,7 +312,9 @@ int blkdev_issue_flush(struct block_device *bdev, sector_t *error_sector)
*error_sector = bio->bi_sector;
ret = 0;
- if (!bio_flagged(bio, BIO_UPTODATE))
+ if (bio_flagged(bio, BIO_EOPNOTSUPP))
+ ret = -EOPNOTSUPP;
+ else if (!bio_flagged(bio, BIO_UPTODATE))
ret = -EIO;
bio_put(bio);
diff --git a/block/blk-core.c b/block/blk-core.c
index 775c8516abf..2a438a93f72 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -127,7 +127,6 @@ void rq_init(struct request_queue *q, struct request *rq)
rq->nr_hw_segments = 0;
rq->ioprio = 0;
rq->special = NULL;
- rq->raw_data_len = 0;
rq->buffer = NULL;
rq->tag = -1;
rq->errors = 0;
@@ -135,6 +134,7 @@ void rq_init(struct request_queue *q, struct request *rq)
rq->cmd_len = 0;
memset(rq->cmd, 0, sizeof(rq->cmd));
rq->data_len = 0;
+ rq->extra_len = 0;
rq->sense_len = 0;
rq->data = NULL;
rq->sense = NULL;
@@ -424,7 +424,6 @@ void blk_put_queue(struct request_queue *q)
{
kobject_put(&q->kobj);
}
-EXPORT_SYMBOL(blk_put_queue);
void blk_cleanup_queue(struct request_queue *q)
{
@@ -592,7 +591,6 @@ int blk_get_queue(struct request_queue *q)
return 1;
}
-EXPORT_SYMBOL(blk_get_queue);
static inline void blk_free_request(struct request_queue *q, struct request *rq)
{
@@ -1768,6 +1766,7 @@ static inline void __end_request(struct request *rq, int uptodate,
/**
* blk_rq_bytes - Returns bytes left to complete in the entire request
+ * @rq: the request being processed
**/
unsigned int blk_rq_bytes(struct request *rq)
{
@@ -1780,6 +1779,7 @@ EXPORT_SYMBOL_GPL(blk_rq_bytes);
/**
* blk_rq_cur_bytes - Returns bytes left to complete in the current segment
+ * @rq: the request being processed
**/
unsigned int blk_rq_cur_bytes(struct request *rq)
{
@@ -2016,7 +2016,6 @@ void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
rq->hard_cur_sectors = rq->current_nr_sectors;
rq->hard_nr_sectors = rq->nr_sectors = bio_sectors(bio);
rq->buffer = bio_data(bio);
- rq->raw_data_len = bio->bi_size;
rq->data_len = bio->bi_size;
rq->bio = rq->biotail = bio;
diff --git a/block/blk-map.c b/block/blk-map.c
index 09f7fd0bcb7..c07d9c8317f 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -19,7 +19,6 @@ int blk_rq_append_bio(struct request_queue *q, struct request *rq,
rq->biotail->bi_next = bio;
rq->biotail = bio;
- rq->raw_data_len += bio->bi_size;
rq->data_len += bio->bi_size;
}
return 0;
@@ -44,6 +43,7 @@ static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
void __user *ubuf, unsigned int len)
{
unsigned long uaddr;
+ unsigned int alignment;
struct bio *bio, *orig_bio;
int reading, ret;
@@ -54,8 +54,8 @@ static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
* direct dma. else, set up kernel bounce buffers
*/
uaddr = (unsigned long) ubuf;
- if (!(uaddr & queue_dma_alignment(q)) &&
- !(len & queue_dma_alignment(q)))
+ alignment = queue_dma_alignment(q) | q->dma_pad_mask;
+ if (!(uaddr & alignment) && !(len & alignment))
bio = bio_map_user(q, NULL, uaddr, len, reading);
else
bio = bio_copy_user(q, uaddr, len, reading);
@@ -142,20 +142,22 @@ int blk_rq_map_user(struct request_queue *q, struct request *rq,
/*
* __blk_rq_map_user() copies the buffers if starting address
- * or length isn't aligned. As the copied buffer is always
- * page aligned, we know that there's enough room for padding.
- * Extend the last bio and update rq->data_len accordingly.
+ * or length isn't aligned to dma_pad_mask. As the copied
+ * buffer is always page aligned, we know that there's enough
+ * room for padding. Extend the last bio and update
+ * rq->data_len accordingly.
*
* On unmap, bio_uncopy_user() will use unmodified
* bio_map_data pointed to by bio->bi_private.
*/
- if (len & queue_dma_alignment(q)) {
- unsigned int pad_len = (queue_dma_alignment(q) & ~len) + 1;
- struct bio *bio = rq->biotail;
+ if (len & q->dma_pad_mask) {
+ unsigned int pad_len = (q->dma_pad_mask & ~len) + 1;
+ struct bio *tail = rq->biotail;
- bio->bi_io_vec[bio->bi_vcnt - 1].bv_len += pad_len;
- bio->bi_size += pad_len;
- rq->data_len += pad_len;
+ tail->bi_io_vec[tail->bi_vcnt - 1].bv_len += pad_len;
+ tail->bi_size += pad_len;
+
+ rq->extra_len += pad_len;
}
rq->buffer = rq->data = NULL;
@@ -215,7 +217,6 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
rq->buffer = rq->data = NULL;
return 0;
}
-EXPORT_SYMBOL(blk_rq_map_user_iov);
/**
* blk_rq_unmap_user - unmap a request with user data
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 7506c4fe026..0f58616bcd7 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -231,7 +231,7 @@ new_segment:
((unsigned long)q->dma_drain_buffer) &
(PAGE_SIZE - 1));
nsegs++;
- rq->data_len += q->dma_drain_size;
+ rq->extra_len += q->dma_drain_size;
}
if (sg)
diff --git a/block/blk-settings.c b/block/blk-settings.c
index 9a8ffdd0ce3..1344a0ea5cc 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -140,7 +140,7 @@ void blk_queue_bounce_limit(struct request_queue *q, u64 dma_addr)
/* Assume anything <= 4GB can be handled by IOMMU.
Actually some IOMMUs can handle everything, but I don't
know of a way to test this here. */
- if (b_pfn < (min_t(u64, 0xffffffff, BLK_BOUNCE_HIGH) >> PAGE_SHIFT))
+ if (b_pfn <= (min_t(u64, 0xffffffff, BLK_BOUNCE_HIGH) >> PAGE_SHIFT))
dma = 1;
q->bounce_pfn = max_low_pfn;
#else
@@ -293,8 +293,24 @@ void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b)
EXPORT_SYMBOL(blk_queue_stack_limits);
/**
- * blk_queue_dma_drain - Set up a drain buffer for excess dma.
+ * blk_queue_dma_pad - set pad mask
+ * @q: the request queue for the device
+ * @mask: pad mask
+ *
+ * Set pad mask. Direct IO requests are padded to the mask specified.
*
+ * Appending pad buffer to a request modifies ->data_len such that it
+ * includes the pad buffer. The original requested data length can be
+ * obtained using blk_rq_raw_data_len().
+ **/
+void blk_queue_dma_pad(struct request_queue *q, unsigned int mask)
+{
+ q->dma_pad_mask = mask;
+}
+EXPORT_SYMBOL(blk_queue_dma_pad);
+
+/**
+ * blk_queue_dma_drain - Set up a drain buffer for excess dma.
* @q: the request queue for the device
* @dma_drain_needed: fn which returns non-zero if drain is necessary
* @buf: physically contiguous buffer
@@ -316,7 +332,7 @@ EXPORT_SYMBOL(blk_queue_stack_limits);
* device can support otherwise there won't be room for the drain
* buffer.
*/
-extern int blk_queue_dma_drain(struct request_queue *q,
+int blk_queue_dma_drain(struct request_queue *q,
dma_drain_needed_fn *dma_drain_needed,
void *buf, unsigned int size)
{
diff --git a/block/blk-tag.c b/block/blk-tag.c
index a8c37d4bbb3..4780a46ce23 100644
--- a/block/blk-tag.c
+++ b/block/blk-tag.c
@@ -6,6 +6,8 @@
#include <linux/bio.h>
#include <linux/blkdev.h>
+#include "blk.h"
+
/**
* blk_queue_find_tag - find a request by its tag and queue
* @q: The request queue for the device
diff --git a/block/blk.h b/block/blk.h
index ec898dd0c65..ec9120fb789 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -32,6 +32,8 @@ void blk_recalc_rq_sectors(struct request *rq, int nsect);
void blk_queue_congestion_threshold(struct request_queue *q);
+int blk_dev_init(void);
+
/*
* Return the threshold (number of used requests) at which the queue is
* considered to be congested. It include a little hysteresis to keep the
diff --git a/block/bsg.c b/block/bsg.c
index 7f3c09549e4..8917c5174dc 100644
--- a/block/bsg.c
+++ b/block/bsg.c
@@ -437,14 +437,14 @@ static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr,
}
if (rq->next_rq) {
- hdr->dout_resid = rq->raw_data_len;
- hdr->din_resid = rq->next_rq->raw_data_len;
+ hdr->dout_resid = rq->data_len;
+ hdr->din_resid = rq->next_rq->data_len;
blk_rq_unmap_user(bidi_bio);
blk_put_request(rq->next_rq);
} else if (rq_data_dir(rq) == READ)
- hdr->din_resid = rq->raw_data_len;
+ hdr->din_resid = rq->data_len;
else
- hdr->dout_resid = rq->raw_data_len;
+ hdr->dout_resid = rq->data_len;
/*
* If the request generated a negative error number, return it
diff --git a/block/genhd.c b/block/genhd.c
index 53f2238e69c..c44527d16c5 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -17,11 +17,15 @@
#include <linux/buffer_head.h>
#include <linux/mutex.h>
+#include "blk.h"
+
static DEFINE_MUTEX(block_class_lock);
#ifndef CONFIG_SYSFS_DEPRECATED
struct kobject *block_depr;
#endif
+static struct device_type disk_type;
+
/*
* Can be deleted altogether. Later.
*
@@ -346,8 +350,6 @@ const struct seq_operations partitions_op = {
#endif
-extern int blk_dev_init(void);
-
static struct kobject *base_probe(dev_t devt, int *part, void *data)
{
if (request_module("block-major-%d-%d", MAJOR(devt), MINOR(devt)) > 0)
@@ -502,7 +504,7 @@ struct class block_class = {
.name = "block",
};
-struct device_type disk_type = {
+static struct device_type disk_type = {
.name = "disk",
.groups = disk_attr_groups,
.release = disk_release,
@@ -632,12 +634,14 @@ static void media_change_notify_thread(struct work_struct *work)
put_device(gd->driverfs_dev);
}
+#if 0
void genhd_media_change_notify(struct gendisk *disk)
{
get_device(disk->driverfs_dev);
schedule_work(&disk->async_notify);
}
EXPORT_SYMBOL_GPL(genhd_media_change_notify);
+#endif /* 0 */
dev_t blk_lookup_devt(const char *name)
{
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
index e993cac4911..a2c3a936ebf 100644
--- a/block/scsi_ioctl.c
+++ b/block/scsi_ioctl.c
@@ -266,7 +266,7 @@ static int blk_complete_sghdr_rq(struct request *rq, struct sg_io_hdr *hdr,
hdr->info = 0;
if (hdr->masked_status || hdr->host_status || hdr->driver_status)
hdr->info |= SG_INFO_CHECK;
- hdr->resid = rq->raw_data_len;
+ hdr->resid = rq->data_len;
hdr->sb_len_wr = 0;
if (rq->sense_len && hdr->sbp) {
@@ -528,8 +528,8 @@ static int __blk_send_generic(struct request_queue *q, struct gendisk *bd_disk,
rq = blk_get_request(q, WRITE, __GFP_WAIT);
rq->cmd_type = REQ_TYPE_BLOCK_PC;
rq->data = NULL;
- rq->raw_data_len = 0;
rq->data_len = 0;
+ rq->extra_len = 0;
rq->timeout = BLK_DEFAULT_SG_TIMEOUT;
memset(rq->cmd, 0, sizeof(rq->cmd));
rq->cmd[0] = cmd;
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 7b1f1ee8131..8f0e8f2bc62 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -862,9 +862,10 @@ static int ata_scsi_dev_config(struct scsi_device *sdev,
struct request_queue *q = sdev->request_queue;
void *buf;
- /* set the min alignment */
+ /* set the min alignment and padding */
blk_queue_update_dma_alignment(sdev->request_queue,
ATA_DMA_PAD_SZ - 1);
+ blk_queue_dma_pad(sdev->request_queue, ATA_DMA_PAD_SZ - 1);
/* configure draining */
buf = kmalloc(ATAPI_MAX_DRAIN, q->bounce_gfp | GFP_KERNEL);
@@ -2538,7 +2539,7 @@ static unsigned int atapi_xlat(struct ata_queued_cmd *qc)
}
qc->tf.command = ATA_CMD_PACKET;
- qc->nbytes = scsi_bufflen(scmd);
+ qc->nbytes = scsi_bufflen(scmd) + scmd->request->extra_len;
/* check whether ATAPI DMA is safe */
if (!using_pio && ata_check_atapi_dma(qc))
@@ -2549,7 +2550,7 @@ static unsigned int atapi_xlat(struct ata_queued_cmd *qc)
* want to set it properly, and for DMA where it is
* effectively meaningless.
*/
- nbytes = min(scmd->request->raw_data_len, (unsigned int)63 * 1024);
+ nbytes = min(scmd->request->data_len, (unsigned int)63 * 1024);
/* Most ATAPI devices which honor transfer chunk size don't
* behave according to the spec when odd chunk size which
@@ -2875,7 +2876,7 @@ static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc)
* TODO: find out if we need to do more here to
* cover scatter/gather case.
*/
- qc->nbytes = scsi_bufflen(scmd);
+ qc->nbytes = scsi_bufflen(scmd) + scmd->request->extra_len;
/* request result TF and be quiet about device error */
qc->flags |= ATA_QCFLAG_RESULT_TF | ATA_QCFLAG_QUIET;
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index 9715be3f248..55bd35c0f08 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -33,6 +33,7 @@
#include <linux/blkpg.h>
#include <linux/timer.h>
#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
#include <linux/init.h>
#include <linux/hdreg.h>
#include <linux/spinlock.h>
@@ -131,7 +132,6 @@ static struct board_type products[] = {
/*define how many times we will try a command because of bus resets */
#define MAX_CMD_RETRIES 3
-#define READ_AHEAD 1024
#define MAX_CTLR 32
/* Originally cciss driver only supports 8 major numbers */
@@ -174,8 +174,6 @@ static int sendcmd_withirq(__u8 cmd, int ctlr, void *buff, size_t size,
static void fail_all_cmds(unsigned long ctlr);
#ifdef CONFIG_PROC_FS
-static int cciss_proc_get_info(char *buffer, char **start, off_t offset,
- int length, int *eof, void *data);
static void cciss_procinit(int i);
#else
static void cciss_procinit(int i)
@@ -240,24 +238,46 @@ static inline CommandList_struct *removeQ(CommandList_struct **Qptr,
*/
#define ENG_GIG 1000000000
#define ENG_GIG_FACTOR (ENG_GIG/512)
+#define ENGAGE_SCSI "engage scsi"
static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG",
"UNKNOWN"
};
static struct proc_dir_entry *proc_cciss;
-static int cciss_proc_get_info(char *buffer, char **start, off_t offset,
- int length, int *eof, void *data)
+static void cciss_seq_show_header(struct seq_file *seq)
{
- off_t pos = 0;
- off_t len = 0;
- int size, i, ctlr;
- ctlr_info_t *h = (ctlr_info_t *) data;
- drive_info_struct *drv;
- unsigned long flags;
- sector_t vol_sz, vol_sz_frac;
+ ctlr_info_t *h = seq->private;
+
+ seq_printf(seq, "%s: HP %s Controller\n"
+ "Board ID: 0x%08lx\n"
+ "Firmware Version: %c%c%c%c\n"
+ "IRQ: %d\n"
+ "Logical drives: %d\n"
+ "Current Q depth: %d\n"
+ "Current # commands on controller: %d\n"
+ "Max Q depth since init: %d\n"
+ "Max # commands on controller since init: %d\n"
+ "Max SG entries since init: %d\n",
+ h->devname,
+ h->product_name,
+ (unsigned long)h->board_id,
+ h->firm_ver[0], h->firm_ver[1], h->firm_ver[2],
+ h->firm_ver[3], (unsigned int)h->intr[SIMPLE_MODE_INT],
+ h->num_luns,
+ h->Qdepth, h->commands_outstanding,
+ h->maxQsinceinit, h->max_outstanding, h->maxSG);
- ctlr = h->ctlr;
+#ifdef CONFIG_CISS_SCSI_TAPE
+ cciss_seq_tape_report(seq, h->ctlr);
+#endif /* CONFIG_CISS_SCSI_TAPE */
+}
+
+static void *cciss_seq_start(struct seq_file *seq, loff_t *pos)
+{
+ ctlr_info_t *h = seq->private;
+ unsigned ctlr = h->ctlr;
+ unsigned long flags;
/* prevent displaying bogus info during configuration
* or deconfiguration of a logical volume
@@ -265,115 +285,155 @@ static int cciss_proc_get_info(char *buffer, char **start, off_t offset,
spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
if (h->busy_configuring) {
spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
- return -EBUSY;
+ return ERR_PTR(-EBUSY);
}
h->busy_configuring = 1;
spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
- size = sprintf(buffer, "%s: HP %s Controller\n"
- "Board ID: 0x%08lx\n"
- "Firmware Version: %c%c%c%c\n"
- "IRQ: %d\n"
- "Logical drives: %d\n"
- "Max sectors: %d\n"
- "Current Q depth: %d\n"
- "Current # commands on controller: %d\n"
- "Max Q depth since init: %d\n"
- "Max # commands on controller since init: %d\n"
- "Max SG entries since init: %d\n\n",
- h->devname,
- h->product_name,
- (unsigned long)h->board_id,
- h->firm_ver[0], h->firm_ver[1], h->firm_ver[2],
- h->firm_ver[3], (unsigned int)h->intr[SIMPLE_MODE_INT],
- h->num_luns,
- h->cciss_max_sectors,
- h->Qdepth, h->commands_outstanding,
- h->maxQsinceinit, h->max_outstanding, h->maxSG);
-
- pos += size;
- len += size;
- cciss_proc_tape_report(ctlr, buffer, &pos, &len);
- for (i = 0; i <= h->highest_lun; i++) {
-
- drv = &h->drv[i];
- if (drv->heads == 0)
- continue;
+ if (*pos == 0)
+ cciss_seq_show_header(seq);
- vol_sz = drv->nr_blocks;
- vol_sz_frac = sector_div(vol_sz, ENG_GIG_FACTOR);
- vol_sz_frac *= 100;
- sector_div(vol_sz_frac, ENG_GIG_FACTOR);
+ return pos;
+}
+
+static int cciss_seq_show(struct seq_file *seq, void *v)
+{
+ sector_t vol_sz, vol_sz_frac;
+ ctlr_info_t *h = seq->private;
+ unsigned ctlr = h->ctlr;
+ loff_t *pos = v;
+ drive_info_struct *drv = &h->drv[*pos];
+
+ if (*pos > h->highest_lun)
+ return 0;
+
+ if (drv->heads == 0)
+ return 0;
+
+ vol_sz = drv->nr_blocks;
+ vol_sz_frac = sector_div(vol_sz, ENG_GIG_FACTOR);
+ vol_sz_frac *= 100;
+ sector_div(vol_sz_frac, ENG_GIG_FACTOR);
+
+ if (drv->raid_level > 5)
+ drv->raid_level = RAID_UNKNOWN;
+ seq_printf(seq, "cciss/c%dd%d:"
+ "\t%4u.%02uGB\tRAID %s\n",
+ ctlr, (int) *pos, (int)vol_sz, (int)vol_sz_frac,
+ raid_label[drv->raid_level]);
+ return 0;
+}
+
+static void *cciss_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+ ctlr_info_t *h = seq->private;
+
+ if (*pos > h->highest_lun)
+ return NULL;
+ *pos += 1;
+
+ return pos;
+}
+
+static void cciss_seq_stop(struct seq_file *seq, void *v)
+{
+ ctlr_info_t *h = seq->private;
+
+ /* Only reset h->busy_configuring if we succeeded in setting
+ * it during cciss_seq_start. */
+ if (v == ERR_PTR(-EBUSY))
+ return;
- if (drv->raid_level > 5)
- drv->raid_level = RAID_UNKNOWN;
- size = sprintf(buffer + len, "cciss/c%dd%d:"
- "\t%4u.%02uGB\tRAID %s\n",
- ctlr, i, (int)vol_sz, (int)vol_sz_frac,
- raid_label[drv->raid_level]);
- pos += size;
- len += size;
- }
-
- *eof = 1;
- *start = buffer + offset;
- len -= offset;
- if (len > length)
- len = length;
h->busy_configuring = 0;
- return len;
}
-static int
-cciss_proc_write(struct file *file, const char __user *buffer,
- unsigned long count, void *data)
+static struct seq_operations cciss_seq_ops = {
+ .start = cciss_seq_start,
+ .show = cciss_seq_show,
+ .next = cciss_seq_next,
+ .stop = cciss_seq_stop,
+};
+
+static int cciss_seq_open(struct inode *inode, struct file *file)
{
- unsigned char cmd[80];
- int len;
-#ifdef CONFIG_CISS_SCSI_TAPE
- ctlr_info_t *h = (ctlr_info_t *) data;
- int rc;
+ int ret = seq_open(file, &cciss_seq_ops);
+ struct seq_file *seq = file->private_data;
+
+ if (!ret)
+ seq->private = PDE(inode)->data;
+
+ return ret;
+}
+
+static ssize_t
+cciss_proc_write(struct file *file, const char __user *buf,
+ size_t length, loff_t *ppos)
+{
+ int err;
+ char *buffer;
+
+#ifndef CONFIG_CISS_SCSI_TAPE
+ return -EINVAL;
#endif
- if (count > sizeof(cmd) - 1)
+ if (!buf || length > PAGE_SIZE - 1)
return -EINVAL;
- if (copy_from_user(cmd, buffer, count))
- return -EFAULT;
- cmd[count] = '\0';
- len = strlen(cmd); // above 3 lines ensure safety
- if (len && cmd[len - 1] == '\n')
- cmd[--len] = '\0';
-# ifdef CONFIG_CISS_SCSI_TAPE
- if (strcmp("engage scsi", cmd) == 0) {
+
+ buffer = (char *)__get_free_page(GFP_KERNEL);
+ if (!buffer)
+ return -ENOMEM;
+
+ err = -EFAULT;
+ if (copy_from_user(buffer, buf, length))
+ goto out;
+ buffer[length] = '\0';
+
+#ifdef CONFIG_CISS_SCSI_TAPE
+ if (strncmp(ENGAGE_SCSI, buffer, sizeof ENGAGE_SCSI - 1) == 0) {
+ struct seq_file *seq = file->private_data;
+ ctlr_info_t *h = seq->private;
+ int rc;
+
rc = cciss_engage_scsi(h->ctlr);
if (rc != 0)
- return -rc;
- return count;
- }
+ err = -rc;
+ else
+ err = length;
+ } else
+#endif /* CONFIG_CISS_SCSI_TAPE */
+ err = -EINVAL;
/* might be nice to have "disengage" too, but it's not
safely possible. (only 1 module use count, lock issues.) */
-# endif
- return -EINVAL;
+
+out:
+ free_page((unsigned long)buffer);
+ return err;
}
-/*
- * Get us a file in /proc/cciss that says something about each controller.
- * Create /proc/cciss if it doesn't exist yet.
- */
+static struct file_operations cciss_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = cciss_seq_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+ .write = cciss_proc_write,
+};
+
static void __devinit cciss_procinit(int i)
{
struct proc_dir_entry *pde;
- if (proc_cciss == NULL) {
+ if (proc_cciss == NULL)
proc_cciss = proc_mkdir("cciss", proc_root_driver);
- if (!proc_cciss)
- return;
- }
+ if (!proc_cciss)
+ return;
+ pde = proc_create(hba[i]->devname, S_IWUSR | S_IRUSR | S_IRGRP |
+ S_IROTH, proc_cciss,
+ &cciss_proc_fops);
+ if (!pde)
+ return;
- pde = create_proc_read_entry(hba[i]->devname,
- S_IWUSR | S_IRUSR | S_IRGRP | S_IROTH,
- proc_cciss, cciss_proc_get_info, hba[i]);
- pde->write_proc = cciss_proc_write;
+ pde->data = hba[i];
}
#endif /* CONFIG_PROC_FS */
@@ -1341,7 +1401,6 @@ geo_inq:
disk->private_data = &h->drv[drv_index];
/* Set up queue information */
- disk->queue->backing_dev_info.ra_pages = READ_AHEAD;
blk_queue_bounce_limit(disk->queue, hba[ctlr]->pdev->dma_mask);
/* This is a hardware imposed limit. */
@@ -3434,7 +3493,6 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
}
drv->queue = q;
- q->backing_dev_info.ra_pages = READ_AHEAD;
blk_queue_bounce_limit(q, hba[i]->pdev->dma_mask);
/* This is a hardware imposed limit. */
diff --git a/drivers/block/cciss_scsi.c b/drivers/block/cciss_scsi.c
index 55178e9973a..45ac09300eb 100644
--- a/drivers/block/cciss_scsi.c
+++ b/drivers/block/cciss_scsi.c
@@ -1404,21 +1404,18 @@ cciss_engage_scsi(int ctlr)
}
static void
-cciss_proc_tape_report(int ctlr, unsigned char *buffer, off_t *pos, off_t *len)
+cciss_seq_tape_report(struct seq_file *seq, int ctlr)
{
unsigned long flags;
- int size;
-
- *pos = *pos -1; *len = *len - 1; // cut off the last trailing newline
CPQ_TAPE_LOCK(ctlr, flags);
- size = sprintf(buffer + *len,
+ seq_printf(seq,
"Sequential access devices: %d\n\n",
ccissscsi[ctlr].ndevices);
CPQ_TAPE_UNLOCK(ctlr, flags);
- *pos += size; *len += size;
}
+
/* Need at least one of these error handlers to keep ../scsi/hosts.c from
* complaining. Doing a host- or bus-reset can't do anything good here.
* Despite what it might say in scsi_error.c, there may well be commands
@@ -1498,6 +1495,5 @@ static int cciss_eh_abort_handler(struct scsi_cmnd *scsicmd)
#define cciss_scsi_setup(cntl_num)
#define cciss_unregister_scsi(ctlr)
#define cciss_register_scsi(ctlr)
-#define cciss_proc_tape_report(ctlr, buffer, pos, len)
#endif /* CONFIG_CISS_SCSI_TAPE */
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index db259e60289..12f5baea439 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -1152,8 +1152,8 @@ clean_up_and_return:
/* This code is similar to that in open_for_data. The routine is called
whenever an audio play operation is requested.
*/
-int check_for_audio_disc(struct cdrom_device_info * cdi,
- struct cdrom_device_ops * cdo)
+static int check_for_audio_disc(struct cdrom_device_info * cdi,
+ struct cdrom_device_ops * cdo)
{
int ret;
tracktype tracks;
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index a703deffb79..27340a7b19d 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -4,7 +4,7 @@
menuconfig DMADEVICES
bool "DMA Engine support"
- depends on (PCI && X86) || ARCH_IOP32X || ARCH_IOP33X || ARCH_IOP13XX
+ depends on (PCI && X86) || ARCH_IOP32X || ARCH_IOP33X || ARCH_IOP13XX || PPC
depends on !HIGHMEM64G
help
DMA engines can do asynchronous data transfers without
@@ -37,6 +37,23 @@ config INTEL_IOP_ADMA
help
Enable support for the Intel(R) IOP Series RAID engines.
+config FSL_DMA
+ bool "Freescale MPC85xx/MPC83xx DMA support"
+ depends on PPC
+ select DMA_ENGINE
+ ---help---
+ Enable support for the Freescale DMA engine. Now, it support
+ MPC8560/40, MPC8555, MPC8548 and MPC8641 processors.
+ The MPC8349, MPC8360 is also supported.
+
+config FSL_DMA_SELFTEST
+ bool "Enable the self test for each DMA channel"
+ depends on FSL_DMA
+ default y
+ ---help---
+ Enable the self test for each DMA channel. A self test will be
+ performed after the channel probed to ensure the DMA works well.
+
config DMA_ENGINE
bool
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index b152cd84e12..c8036d94590 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -3,3 +3,4 @@ obj-$(CONFIG_NET_DMA) += iovlock.o
obj-$(CONFIG_INTEL_IOATDMA) += ioatdma.o
ioatdma-objs := ioat.o ioat_dma.o ioat_dca.o
obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o
+obj-$(CONFIG_FSL_DMA) += fsldma.o
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
new file mode 100644
index 00000000000..cc9a68158d9
--- /dev/null
+++ b/drivers/dma/fsldma.c
@@ -0,0 +1,1067 @@
+/*
+ * Freescale MPC85xx, MPC83xx DMA Engine support
+ *
+ * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
+ *
+ * Author:
+ * Zhang Wei <wei.zhang@freescale.com>, Jul 2007
+ * Ebony Zhu <ebony.zhu@freescale.com>, May 2007
+ *
+ * Description:
+ * DMA engine driver for Freescale MPC8540 DMA controller, which is
+ * also fit for MPC8560, MPC8555, MPC8548, MPC8641, and etc.
+ * The support for MPC8349 DMA contorller is also added.
+ *
+ * This is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/dmaengine.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/of_platform.h>
+
+#include "fsldma.h"
+
+static void dma_init(struct fsl_dma_chan *fsl_chan)
+{
+ /* Reset the channel */
+ DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, 0, 32);
+
+ switch (fsl_chan->feature & FSL_DMA_IP_MASK) {
+ case FSL_DMA_IP_85XX:
+ /* Set the channel to below modes:
+ * EIE - Error interrupt enable
+ * EOSIE - End of segments interrupt enable (basic mode)
+ * EOLNIE - End of links interrupt enable
+ */
+ DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, FSL_DMA_MR_EIE
+ | FSL_DMA_MR_EOLNIE | FSL_DMA_MR_EOSIE, 32);
+ break;
+ case FSL_DMA_IP_83XX:
+ /* Set the channel to below modes:
+ * EOTIE - End-of-transfer interrupt enable
+ */
+ DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, FSL_DMA_MR_EOTIE,
+ 32);
+ break;
+ }
+
+}
+
+static void set_sr(struct fsl_dma_chan *fsl_chan, dma_addr_t val)
+{
+ DMA_OUT(fsl_chan, &fsl_chan->reg_base->sr, val, 32);
+}
+
+static dma_addr_t get_sr(struct fsl_dma_chan *fsl_chan)
+{
+ return DMA_IN(fsl_chan, &fsl_chan->reg_base->sr, 32);
+}
+
+static void set_desc_cnt(struct fsl_dma_chan *fsl_chan,
+ struct fsl_dma_ld_hw *hw, u32 count)
+{
+ hw->count = CPU_TO_DMA(fsl_chan, count, 32);
+}
+
+static void set_desc_src(struct fsl_dma_chan *fsl_chan,
+ struct fsl_dma_ld_hw *hw, dma_addr_t src)
+{
+ u64 snoop_bits;
+
+ snoop_bits = ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
+ ? ((u64)FSL_DMA_SATR_SREADTYPE_SNOOP_READ << 32) : 0;
+ hw->src_addr = CPU_TO_DMA(fsl_chan, snoop_bits | src, 64);
+}
+
+static void set_desc_dest(struct fsl_dma_chan *fsl_chan,
+ struct fsl_dma_ld_hw *hw, dma_addr_t dest)
+{
+ u64 snoop_bits;
+
+ snoop_bits = ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
+ ? ((u64)FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE << 32) : 0;
+ hw->dst_addr = CPU_TO_DMA(fsl_chan, snoop_bits | dest, 64);
+}
+
+static void set_desc_next(struct fsl_dma_chan *fsl_chan,
+ struct fsl_dma_ld_hw *hw, dma_addr_t next)
+{
+ u64 snoop_bits;
+
+ snoop_bits = ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX)
+ ? FSL_DMA_SNEN : 0;
+ hw->next_ln_addr = CPU_TO_DMA(fsl_chan, snoop_bits | next, 64);
+}
+
+static void set_cdar(struct fsl_dma_chan *fsl_chan, dma_addr_t addr)
+{
+ DMA_OUT(fsl_chan, &fsl_chan->reg_base->cdar, addr | FSL_DMA_SNEN, 64);
+}
+
+static dma_addr_t get_cdar(struct fsl_dma_chan *fsl_chan)
+{
+ return DMA_IN(fsl_chan, &fsl_chan->reg_base->cdar, 64) & ~FSL_DMA_SNEN;
+}
+
+static void set_ndar(struct fsl_dma_chan *fsl_chan, dma_addr_t addr)
+{
+ DMA_OUT(fsl_chan, &fsl_chan->reg_base->ndar, addr, 64);
+}
+
+static dma_addr_t get_ndar(struct fsl_dma_chan *fsl_chan)
+{
+ return DMA_IN(fsl_chan, &fsl_chan->reg_base->ndar, 64);
+}
+
+static int dma_is_idle(struct fsl_dma_chan *fsl_chan)
+{
+ u32 sr = get_sr(fsl_chan);
+ return (!(sr & FSL_DMA_SR_CB)) || (sr & FSL_DMA_SR_CH);
+}
+
+static void dma_start(struct fsl_dma_chan *fsl_chan)
+{
+ u32 mr_set = 0;;
+
+ if (fsl_chan->feature & FSL_DMA_CHAN_PAUSE_EXT) {
+ DMA_OUT(fsl_chan, &fsl_chan->reg_base->bcr, 0, 32);
+ mr_set |= FSL_DMA_MR_EMP_EN;
+ } else
+ DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
+ DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32)
+ & ~FSL_DMA_MR_EMP_EN, 32);
+
+ if (fsl_chan->feature & FSL_DMA_CHAN_START_EXT)
+ mr_set |= FSL_DMA_MR_EMS_EN;
+ else
+ mr_set |= FSL_DMA_MR_CS;
+
+ DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
+ DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32)
+ | mr_set, 32);
+}
+
+static void dma_halt(struct fsl_dma_chan *fsl_chan)
+{
+ int i = 0;
+ DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
+ DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) | FSL_DMA_MR_CA,
+ 32);
+ DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
+ DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) & ~(FSL_DMA_MR_CS
+ | FSL_DMA_MR_EMS_EN | FSL_DMA_MR_CA), 32);
+
+ while (!dma_is_idle(fsl_chan) && (i++ < 100))
+ udelay(10);
+ if (i >= 100 && !dma_is_idle(fsl_chan))
+ dev_err(fsl_chan->dev, "DMA halt timeout!\n");
+}
+
+static void set_ld_eol(struct fsl_dma_chan *fsl_chan,
+ struct fsl_desc_sw *desc)
+{
+ desc->hw.next_ln_addr = CPU_TO_DMA(fsl_chan,
+ DMA_TO_CPU(fsl_chan, desc->hw.next_ln_addr, 64) | FSL_DMA_EOL,
+ 64);
+}
+
+static void append_ld_queue(struct fsl_dma_chan *fsl_chan,
+ struct fsl_desc_sw *new_desc)
+{
+ struct fsl_desc_sw *queue_tail = to_fsl_desc(fsl_chan->ld_queue.prev);
+
+ if (list_empty(&fsl_chan->ld_queue))
+ return;
+
+ /* Link to the new descriptor physical address and
+ * Enable End-of-segment interrupt for
+ * the last link descriptor.
+ * (the previous node's next link descriptor)
+ *
+ * For FSL_DMA_IP_83xx, the snoop enable bit need be set.
+ */
+ queue_tail->hw.next_ln_addr = CPU_TO_DMA(fsl_chan,
+ new_desc->async_tx.phys | FSL_DMA_EOSIE |
+ (((fsl_chan->feature & FSL_DMA_IP_MASK)
+ == FSL_DMA_IP_83XX) ? FSL_DMA_SNEN : 0), 64);
+}
+
+/**
+ * fsl_chan_set_src_loop_size - Set source address hold transfer size
+ * @fsl_chan : Freescale DMA channel
+ * @size : Address loop size, 0 for disable loop
+ *
+ * The set source address hold transfer size. The source
+ * address hold or loop transfer size is when the DMA transfer
+ * data from source address (SA), if the loop size is 4, the DMA will
+ * read data from SA, SA + 1, SA + 2, SA + 3, then loop back to SA,
+ * SA + 1 ... and so on.
+ */
+static void fsl_chan_set_src_loop_size(struct fsl_dma_chan *fsl_chan, int size)
+{
+ switch (size) {
+ case 0:
+ DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
+ DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) &
+ (~FSL_DMA_MR_SAHE), 32);
+ break;
+ case 1:
+ case 2:
+ case 4:
+ case 8:
+ DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
+ DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) |
+ FSL_DMA_MR_SAHE | (__ilog2(size) << 14),
+ 32);
+ break;
+ }
+}
+
+/**
+ * fsl_chan_set_dest_loop_size - Set destination address hold transfer size
+ * @fsl_chan : Freescale DMA channel
+ * @size : Address loop size, 0 for disable loop
+ *
+ * The set destination address hold transfer size. The destination
+ * address hold or loop transfer size is when the DMA transfer
+ * data to destination address (TA), if the loop size is 4, the DMA will
+ * write data to TA, TA + 1, TA + 2, TA + 3, then loop back to TA,
+ * TA + 1 ... and so on.
+ */
+static void fsl_chan_set_dest_loop_size(struct fsl_dma_chan *fsl_chan, int size)
+{
+ switch (size) {
+ case 0:
+ DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
+ DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) &
+ (~FSL_DMA_MR_DAHE), 32);
+ break;
+ case 1:
+ case 2:
+ case 4:
+ case 8:
+ DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
+ DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) |
+ FSL_DMA_MR_DAHE | (__ilog2(size) << 16),
+ 32);
+ break;
+ }
+}
+
+/**
+ * fsl_chan_toggle_ext_pause - Toggle channel external pause status
+ * @fsl_chan : Freescale DMA channel
+ * @size : Pause control size, 0 for disable external pause control.
+ * The maximum is 1024.
+ *
+ * The Freescale DMA channel can be controlled by the external
+ * signal DREQ#. The pause control size is how many bytes are allowed
+ * to transfer before pausing the channel, after which a new assertion
+ * of DREQ# resumes channel operation.
+ */
+static void fsl_chan_toggle_ext_pause(struct fsl_dma_chan *fsl_chan, int size)
+{
+ if (size > 1024)
+ return;
+
+ if (size) {
+ DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
+ DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32)
+ | ((__ilog2(size) << 24) & 0x0f000000),
+ 32);
+ fsl_chan->feature |= FSL_DMA_CHAN_PAUSE_EXT;
+ } else
+ fsl_chan->feature &= ~FSL_DMA_CHAN_PAUSE_EXT;
+}
+
+/**
+ * fsl_chan_toggle_ext_start - Toggle channel external start status
+ * @fsl_chan : Freescale DMA channel
+ * @enable : 0 is disabled, 1 is enabled.
+ *
+ * If enable the external start, the channel can be started by an
+ * external DMA start pin. So the dma_start() does not start the
+ * transfer immediately. The DMA channel will wait for the
+ * control pin asserted.
+ */
+static void fsl_chan_toggle_ext_start(struct fsl_dma_chan *fsl_chan, int enable)
+{
+ if (enable)
+ fsl_chan->feature |= FSL_DMA_CHAN_START_EXT;
+ else
+ fsl_chan->feature &= ~FSL_DMA_CHAN_START_EXT;
+}
+
+static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+ struct fsl_desc_sw *desc = tx_to_fsl_desc(tx);
+ struct fsl_dma_chan *fsl_chan = to_fsl_chan(tx->chan);
+ unsigned long flags;
+ dma_cookie_t cookie;
+
+ /* cookie increment and adding to ld_queue must be atomic */
+ spin_lock_irqsave(&fsl_chan->desc_lock, flags);
+
+ cookie = fsl_chan->common.cookie;
+ cookie++;
+ if (cookie < 0)
+ cookie = 1;
+ desc->async_tx.cookie = cookie;
+ fsl_chan->common.cookie = desc->async_tx.cookie;
+
+ append_ld_queue(fsl_chan, desc);
+ list_splice_init(&desc->async_tx.tx_list, fsl_chan->ld_queue.prev);
+
+ spin_unlock_irqrestore(&fsl_chan->desc_lock, flags);
+
+ return cookie;
+}
+
+/**
+ * fsl_dma_alloc_descriptor - Allocate descriptor from channel's DMA pool.
+ * @fsl_chan : Freescale DMA channel
+ *
+ * Return - The descriptor allocated. NULL for failed.
+ */
+static struct fsl_desc_sw *fsl_dma_alloc_descriptor(
+ struct fsl_dma_chan *fsl_chan)
+{
+ dma_addr_t pdesc;
+ struct fsl_desc_sw *desc_sw;
+
+ desc_sw = dma_pool_alloc(fsl_chan->desc_pool, GFP_ATOMIC, &pdesc);
+ if (desc_sw) {
+ memset(desc_sw, 0, sizeof(struct fsl_desc_sw));
+ dma_async_tx_descriptor_init(&desc_sw->async_tx,
+ &fsl_chan->common);
+ desc_sw->async_tx.tx_submit = fsl_dma_tx_submit;
+ INIT_LIST_HEAD(&desc_sw->async_tx.tx_list);
+ desc_sw->async_tx.phys = pdesc;
+ }
+
+ return desc_sw;
+}
+
+
+/**
+ * fsl_dma_alloc_chan_resources - Allocate resources for DMA channel.
+ * @fsl_chan : Freescale DMA channel
+ *
+ * This function will create a dma pool for descriptor allocation.
+ *
+ * Return - The number of descriptors allocated.
+ */
+static int fsl_dma_alloc_chan_resources(struct dma_chan *chan)
+{
+ struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan);
+ LIST_HEAD(tmp_list);
+
+ /* We need the descriptor to be aligned to 32bytes
+ * for meeting FSL DMA specification requirement.
+ */
+ fsl_chan->desc_pool = dma_pool_create("fsl_dma_engine_desc_pool",
+ fsl_chan->dev, sizeof(struct fsl_desc_sw),
+ 32, 0);
+ if (!fsl_chan->desc_pool) {
+ dev_err(fsl_chan->dev, "No memory for channel %d "
+ "descriptor dma pool.\n", fsl_chan->id);
+ return 0;
+ }
+
+ return 1;
+}
+
+/**
+ * fsl_dma_free_chan_resources - Free all resources of the channel.
+ * @fsl_chan : Freescale DMA channel
+ */
+static void fsl_dma_free_chan_resources(struct dma_chan *chan)
+{
+ struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan);
+ struct fsl_desc_sw *desc, *_desc;
+ unsigned long flags;
+
+ dev_dbg(fsl_chan->dev, "Free all channel resources.\n");
+ spin_lock_irqsave(&fsl_chan->desc_lock, flags);
+ list_for_each_entry_safe(desc, _desc, &fsl_chan->ld_queue, node) {
+#ifdef FSL_DMA_LD_DEBUG
+ dev_dbg(fsl_chan->dev,
+ "LD %p will be released.\n", desc);
+#endif
+ list_del(&desc->node);
+ /* free link descriptor */
+ dma_pool_free(fsl_chan->desc_pool, desc, desc->async_tx.phys);
+ }
+ spin_unlock_irqrestore(&fsl_chan->desc_lock, flags);
+ dma_pool_destroy(fsl_chan->desc_pool);
+}
+
+static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy(
+ struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t dma_src,
+ size_t len, unsigned long flags)
+{
+ struct fsl_dma_chan *fsl_chan;
+ struct fsl_desc_sw *first = NULL, *prev = NULL, *new;
+ size_t copy;
+ LIST_HEAD(link_chain);
+
+ if (!chan)
+ return NULL;
+
+ if (!len)
+ return NULL;
+
+ fsl_chan = to_fsl_chan(chan);
+
+ do {
+
+ /* Allocate the link descriptor from DMA pool */
+ new = fsl_dma_alloc_descriptor(fsl_chan);
+ if (!new) {
+ dev_err(fsl_chan->dev,
+ "No free memory for link descriptor\n");
+ return NULL;
+ }
+#ifdef FSL_DMA_LD_DEBUG
+ dev_dbg(fsl_chan->dev, "new link desc alloc %p\n", new);
+#endif
+
+ copy = min(len, FSL_DMA_BCR_MAX_CNT);
+
+ set_desc_cnt(fsl_chan, &new->hw, copy);
+ set_desc_src(fsl_chan, &new->hw, dma_src);
+ set_desc_dest(fsl_chan, &new->hw, dma_dest);
+
+ if (!first)
+ first = new;
+ else
+ set_desc_next(fsl_chan, &prev->hw, new->async_tx.phys);
+
+ new->async_tx.cookie = 0;
+ new->async_tx.ack = 1;
+
+ prev = new;
+ len -= copy;
+ dma_src += copy;
+ dma_dest += copy;
+
+ /* Insert the link descriptor to the LD ring */
+ list_add_tail(&new->node, &first->async_tx.tx_list);
+ } while (len);
+
+ new->async_tx.ack = 0; /* client is in control of this ack */
+ new->async_tx.cookie = -EBUSY;
+
+ /* Set End-of-link to the last link descriptor of new list*/
+ set_ld_eol(fsl_chan, new);
+
+ return first ? &first->async_tx : NULL;
+}
+
+/**
+ * fsl_dma_update_completed_cookie - Update the completed cookie.
+ * @fsl_chan : Freescale DMA channel
+ */
+static void fsl_dma_update_completed_cookie(struct fsl_dma_chan *fsl_chan)
+{
+ struct fsl_desc_sw *cur_desc, *desc;
+ dma_addr_t ld_phy;
+
+ ld_phy = get_cdar(fsl_chan) & FSL_DMA_NLDA_MASK;
+
+ if (ld_phy) {
+ cur_desc = NULL;
+ list_for_each_entry(desc, &fsl_chan->ld_queue, node)
+ if (desc->async_tx.phys == ld_phy) {
+ cur_desc = desc;
+ break;
+ }
+
+ if (cur_desc && cur_desc->async_tx.cookie) {
+ if (dma_is_idle(fsl_chan))
+ fsl_chan->completed_cookie =
+ cur_desc->async_tx.cookie;
+ else
+ fsl_chan->completed_cookie =
+ cur_desc->async_tx.cookie - 1;
+ }
+ }
+}
+
+/**
+ * fsl_chan_ld_cleanup - Clean up link descriptors
+ * @fsl_chan : Freescale DMA channel
+ *
+ * This function clean up the ld_queue of DMA channel.
+ * If 'in_intr' is set, the function will move the link descriptor to
+ * the recycle list. Otherwise, free it directly.
+ */
+static void fsl_chan_ld_cleanup(struct fsl_dma_chan *fsl_chan)
+{
+ struct fsl_desc_sw *desc, *_desc;
+ unsigned long flags;
+
+ spin_lock_irqsave(&fsl_chan->desc_lock, flags);
+
+ fsl_dma_update_completed_cookie(fsl_chan);
+ dev_dbg(fsl_chan->dev, "chan completed_cookie = %d\n",
+ fsl_chan->completed_cookie);
+ list_for_each_entry_safe(desc, _desc, &fsl_chan->ld_queue, node) {
+ dma_async_tx_callback callback;
+ void *callback_param;
+
+ if (dma_async_is_complete(desc->async_tx.cookie,
+ fsl_chan->completed_cookie, fsl_chan->common.cookie)
+ == DMA_IN_PROGRESS)
+ break;
+
+ callback = desc->async_tx.callback;
+ callback_param = desc->async_tx.callback_param;
+
+ /* Remove from ld_queue list */
+ list_del(&desc->node);
+
+ dev_dbg(fsl_chan->dev, "link descriptor %p will be recycle.\n",
+ desc);
+ dma_pool_free(fsl_chan->desc_pool, desc, desc->async_tx.phys);
+
+ /* Run the link descriptor callback function */
+ if (callback) {
+ spin_unlock_irqrestore(&fsl_chan->desc_lock, flags);
+ dev_dbg(fsl_chan->dev, "link descriptor %p callback\n",
+ desc);
+ callback(callback_param);
+ spin_lock_irqsave(&fsl_chan->desc_lock, flags);
+ }
+ }
+ spin_unlock_irqrestore(&fsl_chan->desc_lock, flags);
+}
+
+/**
+ * fsl_chan_xfer_ld_queue - Transfer link descriptors in channel ld_queue.
+ * @fsl_chan : Freescale DMA channel
+ */
+static void fsl_chan_xfer_ld_queue(struct fsl_dma_chan *fsl_chan)
+{
+ struct list_head *ld_node;
+ dma_addr_t next_dest_addr;
+ unsigned long flags;
+
+ if (!dma_is_idle(fsl_chan))
+ return;
+
+ dma_halt(fsl_chan);
+
+ /* If there are some link descriptors
+ * not transfered in queue. We need to start it.
+ */
+ spin_lock_irqsave(&fsl_chan->desc_lock, flags);
+
+ /* Find the first un-transfer desciptor */
+ for (ld_node = fsl_chan->ld_queue.next;
+ (ld_node != &fsl_chan->ld_queue)
+ && (dma_async_is_complete(
+ to_fsl_desc(ld_node)->async_tx.cookie,
+ fsl_chan->completed_cookie,
+ fsl_chan->common.cookie) == DMA_SUCCESS);
+ ld_node = ld_node->next);
+
+ spin_unlock_irqrestore(&fsl_chan->desc_lock, flags);
+
+ if (ld_node != &fsl_chan->ld_queue) {
+ /* Get the ld start address from ld_queue */
+ next_dest_addr = to_fsl_desc(ld_node)->async_tx.phys;
+ dev_dbg(fsl_chan->dev, "xfer LDs staring from 0x%016llx\n",
+ (u64)next_dest_addr);
+ set_cdar(fsl_chan, next_dest_addr);
+ dma_start(fsl_chan);
+ } else {
+ set_cdar(fsl_chan, 0);
+ set_ndar(fsl_chan, 0);
+ }
+}
+
+/**
+ * fsl_dma_memcpy_issue_pending - Issue the DMA start command
+ * @fsl_chan : Freescale DMA channel
+ */
+static void fsl_dma_memcpy_issue_pending(struct dma_chan *chan)
+{
+ struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan);
+
+#ifdef FSL_DMA_LD_DEBUG
+ struct fsl_desc_sw *ld;
+ unsigned long flags;
+
+ spin_lock_irqsave(&fsl_chan->desc_lock, flags);
+ if (list_empty(&fsl_chan->ld_queue)) {
+ spin_unlock_irqrestore(&fsl_chan->desc_lock, flags);
+ return;
+ }
+
+ dev_dbg(fsl_chan->dev, "--memcpy issue--\n");
+ list_for_each_entry(ld, &fsl_chan->ld_queue, node) {
+ int i;
+ dev_dbg(fsl_chan->dev, "Ch %d, LD %08x\n",
+ fsl_chan->id, ld->async_tx.phys);
+ for (i = 0; i < 8; i++)
+ dev_dbg(fsl_chan->dev, "LD offset %d: %08x\n",
+ i, *(((u32 *)&ld->hw) + i));
+ }
+ dev_dbg(fsl_chan->dev, "----------------\n");
+ spin_unlock_irqrestore(&fsl_chan->desc_lock, flags);
+#endif
+
+ fsl_chan_xfer_ld_queue(fsl_chan);
+}
+
+static void fsl_dma_dependency_added(struct dma_chan *chan)
+{
+ struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan);
+
+ fsl_chan_ld_cleanup(fsl_chan);
+}
+
+/**
+ * fsl_dma_is_complete - Determine the DMA status
+ * @fsl_chan : Freescale DMA channel
+ */
+static enum dma_status fsl_dma_is_complete(struct dma_chan *chan,
+ dma_cookie_t cookie,
+ dma_cookie_t *done,
+ dma_cookie_t *used)
+{
+ struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan);
+ dma_cookie_t last_used;
+ dma_cookie_t last_complete;
+
+ fsl_chan_ld_cleanup(fsl_chan);
+
+ last_used = chan->cookie;
+ last_complete = fsl_chan->completed_cookie;
+
+ if (done)
+ *done = last_complete;
+
+ if (used)
+ *used = last_used;
+
+ return dma_async_is_complete(cookie, last_complete, last_used);
+}
+
+static irqreturn_t fsl_dma_chan_do_interrupt(int irq, void *data)
+{
+ struct fsl_dma_chan *fsl_chan = (struct fsl_dma_chan *)data;
+ dma_addr_t stat;
+
+ stat = get_sr(fsl_chan);
+ dev_dbg(fsl_chan->dev, "event: channel %d, stat = 0x%x\n",
+ fsl_chan->id, stat);
+ set_sr(fsl_chan, stat); /* Clear the event register */
+
+ stat &= ~(FSL_DMA_SR_CB | FSL_DMA_SR_CH);
+ if (!stat)
+ return IRQ_NONE;
+
+ if (stat & FSL_DMA_SR_TE)
+ dev_err(fsl_chan->dev, "Transfer Error!\n");
+
+ /* If the link descriptor segment transfer finishes,
+ * we will recycle the used descriptor.
+ */
+ if (stat & FSL_DMA_SR_EOSI) {
+ dev_dbg(fsl_chan->dev, "event: End-of-segments INT\n");
+ dev_dbg(fsl_chan->dev, "event: clndar 0x%016llx, "
+ "nlndar 0x%016llx\n", (u64)get_cdar(fsl_chan),
+ (u64)get_ndar(fsl_chan));
+ stat &= ~FSL_DMA_SR_EOSI;
+ }
+
+ /* If it current transfer is the end-of-transfer,
+ * we should clear the Channel Start bit for
+ * prepare next transfer.
+ */
+ if (stat & (FSL_DMA_SR_EOLNI | FSL_DMA_SR_EOCDI)) {
+ dev_dbg(fsl_chan->dev, "event: End-of-link INT\n");
+ stat &= ~FSL_DMA_SR_EOLNI;
+ fsl_chan_xfer_ld_queue(fsl_chan);
+ }
+
+ if (stat)
+ dev_dbg(fsl_chan->dev, "event: unhandled sr 0x%02x\n",
+ stat);
+
+ dev_dbg(fsl_chan->dev, "event: Exit\n");
+ tasklet_schedule(&fsl_chan->tasklet);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t fsl_dma_do_interrupt(int irq, void *data)
+{
+ struct fsl_dma_device *fdev = (struct fsl_dma_device *)data;
+ u32 gsr;
+ int ch_nr;
+
+ gsr = (fdev->feature & FSL_DMA_BIG_ENDIAN) ? in_be32(fdev->reg_base)
+ : in_le32(fdev->reg_base);
+ ch_nr = (32 - ffs(gsr)) / 8;
+
+ return fdev->chan[ch_nr] ? fsl_dma_chan_do_interrupt(irq,
+ fdev->chan[ch_nr]) : IRQ_NONE;
+}
+
+static void dma_do_tasklet(unsigned long data)
+{
+ struct fsl_dma_chan *fsl_chan = (struct fsl_dma_chan *)data;
+ fsl_chan_ld_cleanup(fsl_chan);
+}
+
+static void fsl_dma_callback_test(struct fsl_dma_chan *fsl_chan)
+{
+ if (fsl_chan)
+ dev_info(fsl_chan->dev, "selftest: callback is ok!\n");
+}
+
+static int fsl_dma_self_test(struct fsl_dma_chan *fsl_chan)
+{
+ struct dma_chan *chan;
+ int err = 0;
+ dma_addr_t dma_dest, dma_src;
+ dma_cookie_t cookie;
+ u8 *src, *dest;
+ int i;
+ size_t test_size;
+ struct dma_async_tx_descriptor *tx1, *tx2, *tx3;
+
+ test_size = 4096;
+
+ src = kmalloc(test_size * 2, GFP_KERNEL);
+ if (!src) {
+ dev_err(fsl_chan->dev,
+ "selftest: Cannot alloc memory for test!\n");
+ err = -ENOMEM;
+ goto out;
+ }
+
+ dest = src + test_size;
+
+ for (i = 0; i < test_size; i++)
+ src[i] = (u8) i;
+
+ chan = &fsl_chan->common;
+
+ if (fsl_dma_alloc_chan_resources(chan) < 1) {
+ dev_err(fsl_chan->dev,
+ "selftest: Cannot alloc resources for DMA\n");
+ err = -ENODEV;
+ goto out;
+ }
+
+ /* TX 1 */
+ dma_src = dma_map_single(fsl_chan->dev, src, test_size / 2,
+ DMA_TO_DEVICE);
+ dma_dest = dma_map_single(fsl_chan->dev, dest, test_size / 2,
+ DMA_FROM_DEVICE);
+ tx1 = fsl_dma_prep_memcpy(chan, dma_dest, dma_src, test_size / 2, 0);
+ async_tx_ack(tx1);
+
+ cookie = fsl_dma_tx_submit(tx1);
+ fsl_dma_memcpy_issue_pending(chan);
+ msleep(2);
+
+ if (fsl_dma_is_complete(chan, cookie, NULL, NULL) != DMA_SUCCESS) {
+ dev_err(fsl_chan->dev, "selftest: Time out!\n");
+ err = -ENODEV;
+ goto out;
+ }
+
+ /* Test free and re-alloc channel resources */
+ fsl_dma_free_chan_resources(chan);
+
+ if (fsl_dma_alloc_chan_resources(chan) < 1) {
+ dev_err(fsl_chan->dev,
+ "selftest: Cannot alloc resources for DMA\n");
+ err = -ENODEV;
+ goto free_resources;
+ }
+
+ /* Continue to test
+ * TX 2
+ */
+ dma_src = dma_map_single(fsl_chan->dev, src + test_size / 2,
+ test_size / 4, DMA_TO_DEVICE);
+ dma_dest = dma_map_single(fsl_chan->dev, dest + test_size / 2,
+ test_size / 4, DMA_FROM_DEVICE);
+ tx2 = fsl_dma_prep_memcpy(chan, dma_dest, dma_src, test_size / 4, 0);
+ async_tx_ack(tx2);
+
+ /* TX 3 */
+ dma_src = dma_map_single(fsl_chan->dev, src + test_size * 3 / 4,
+ test_size / 4, DMA_TO_DEVICE);
+ dma_dest = dma_map_single(fsl_chan->dev, dest + test_size * 3 / 4,
+ test_size / 4, DMA_FROM_DEVICE);
+ tx3 = fsl_dma_prep_memcpy(chan, dma_dest, dma_src, test_size / 4, 0);
+ async_tx_ack(tx3);
+
+ /* Test exchanging the prepared tx sort */
+ cookie = fsl_dma_tx_submit(tx3);
+ cookie = fsl_dma_tx_submit(tx2);
+
+#ifdef FSL_DMA_CALLBACKTEST
+ if (dma_has_cap(DMA_INTERRUPT, ((struct fsl_dma_device *)
+ dev_get_drvdata(fsl_chan->dev->parent))->common.cap_mask)) {
+ tx3->callback = fsl_dma_callback_test;
+ tx3->callback_param = fsl_chan;
+ }
+#endif
+ fsl_dma_memcpy_issue_pending(chan);
+ msleep(2);
+
+ if (fsl_dma_is_complete(chan, cookie, NULL, NULL) != DMA_SUCCESS) {
+ dev_err(fsl_chan->dev, "selftest: Time out!\n");
+ err = -ENODEV;
+ goto free_resources;
+ }
+
+ err = memcmp(src, dest, test_size);
+ if (err) {
+ for (i = 0; (*(src + i) == *(dest + i)) && (i < test_size);
+ i++);
+ dev_err(fsl_chan->dev, "selftest: Test failed, data %d/%d is "
+ "error! src 0x%x, dest 0x%x\n",
+ i, test_size, *(src + i), *(dest + i));
+ }
+
+free_resources:
+ fsl_dma_free_chan_resources(chan);
+out:
+ kfree(src);
+ return err;
+}
+
+static int __devinit of_fsl_dma_chan_probe(struct of_device *dev,
+ const struct of_device_id *match)
+{
+ struct fsl_dma_device *fdev;
+ struct fsl_dma_chan *new_fsl_chan;
+ int err;
+
+ fdev = dev_get_drvdata(dev->dev.parent);
+ BUG_ON(!fdev);
+
+ /* alloc channel */
+ new_fsl_chan = kzalloc(sizeof(struct fsl_dma_chan), GFP_KERNEL);
+ if (!new_fsl_chan) {
+ dev_err(&dev->dev, "No free memory for allocating "
+ "dma channels!\n");
+ err = -ENOMEM;
+ goto err;
+ }
+
+ /* get dma channel register base */
+ err = of_address_to_resource(dev->node, 0, &new_fsl_chan->reg);
+ if (err) {
+ dev_err(&dev->dev, "Can't get %s property 'reg'\n",
+ dev->node->full_name);
+ goto err;
+ }
+
+ new_fsl_chan->feature = *(u32 *)match->data;
+
+ if (!fdev->feature)
+ fdev->feature = new_fsl_chan->feature;
+
+ /* If the DMA device's feature is different than its channels',
+ * report the bug.
+ */
+ WARN_ON(fdev->feature != new_fsl_chan->feature);
+
+ new_fsl_chan->dev = &dev->dev;
+ new_fsl_chan->reg_base = ioremap(new_fsl_chan->reg.start,
+ new_fsl_chan->reg.end - new_fsl_chan->reg.start + 1);
+
+ new_fsl_chan->id = ((new_fsl_chan->reg.start - 0x100) & 0xfff) >> 7;
+ if (new_fsl_chan->id > FSL_DMA_MAX_CHANS_PER_DEVICE) {
+ dev_err(&dev->dev, "There is no %d channel!\n",
+ new_fsl_chan->id);
+ err = -EINVAL;
+ goto err;
+ }
+ fdev->chan[new_fsl_chan->id] = new_fsl_chan;
+ tasklet_init(&new_fsl_chan->tasklet, dma_do_tasklet,
+ (unsigned long)new_fsl_chan);
+
+ /* Init the channel */
+ dma_init(new_fsl_chan);
+
+ /* Clear cdar registers */
+ set_cdar(new_fsl_chan, 0);
+
+ switch (new_fsl_chan->feature & FSL_DMA_IP_MASK) {
+ case FSL_DMA_IP_85XX:
+ new_fsl_chan->toggle_ext_start = fsl_chan_toggle_ext_start;
+ new_fsl_chan->toggle_ext_pause = fsl_chan_toggle_ext_pause;
+ case FSL_DMA_IP_83XX:
+ new_fsl_chan->set_src_loop_size = fsl_chan_set_src_loop_size;
+ new_fsl_chan->set_dest_loop_size = fsl_chan_set_dest_loop_size;
+ }
+
+ spin_lock_init(&new_fsl_chan->desc_lock);
+ INIT_LIST_HEAD(&new_fsl_chan->ld_queue);
+
+ new_fsl_chan->common.device = &fdev->common;
+
+ /* Add the channel to DMA device channel list */
+ list_add_tail(&new_fsl_chan->common.device_node,
+ &fdev->common.channels);
+ fdev->common.chancnt++;
+
+ new_fsl_chan->irq = irq_of_parse_and_map(dev->node, 0);
+ if (new_fsl_chan->irq != NO_IRQ) {
+ err = request_irq(new_fsl_chan->irq,
+ &fsl_dma_chan_do_interrupt, IRQF_SHARED,
+ "fsldma-channel", new_fsl_chan);
+ if (err) {
+ dev_err(&dev->dev, "DMA channel %s request_irq error "
+ "with return %d\n", dev->node->full_name, err);
+ goto err;
+ }
+ }
+
+#ifdef CONFIG_FSL_DMA_SELFTEST
+ err = fsl_dma_self_test(new_fsl_chan);
+ if (err)
+ goto err;
+#endif
+
+ dev_info(&dev->dev, "#%d (%s), irq %d\n", new_fsl_chan->id,
+ match->compatible, new_fsl_chan->irq);
+
+ return 0;
+err:
+ dma_halt(new_fsl_chan);
+ iounmap(new_fsl_chan->reg_base);
+ free_irq(new_fsl_chan->irq, new_fsl_chan);
+ list_del(&new_fsl_chan->common.device_node);
+ kfree(new_fsl_chan);
+ return err;
+}
+
+const u32 mpc8540_dma_ip_feature = FSL_DMA_IP_85XX | FSL_DMA_BIG_ENDIAN;
+const u32 mpc8349_dma_ip_feature = FSL_DMA_IP_83XX | FSL_DMA_LITTLE_ENDIAN;
+
+static struct of_device_id of_fsl_dma_chan_ids[] = {
+ {
+ .compatible = "fsl,mpc8540-dma-channel",
+ .data = (void *)&mpc8540_dma_ip_feature,
+ },
+ {
+ .compatible = "fsl,mpc8349-dma-channel",
+ .data = (void *)&mpc8349_dma_ip_feature,
+ },
+ {}
+};
+
+static struct of_platform_driver of_fsl_dma_chan_driver = {
+ .name = "of-fsl-dma-channel",
+ .match_table = of_fsl_dma_chan_ids,
+ .probe = of_fsl_dma_chan_probe,
+};
+
+static __init int of_fsl_dma_chan_init(void)
+{
+ return of_register_platform_driver(&of_fsl_dma_chan_driver);
+}
+
+static int __devinit of_fsl_dma_probe(struct of_device *dev,
+ const struct of_device_id *match)
+{
+ int err;
+ unsigned int irq;
+ struct fsl_dma_device *fdev;
+
+ fdev = kzalloc(sizeof(struct fsl_dma_device), GFP_KERNEL);
+ if (!fdev) {
+ dev_err(&dev->dev, "No enough memory for 'priv'\n");
+ err = -ENOMEM;
+ goto err;
+ }
+ fdev->dev = &dev->dev;
+ INIT_LIST_HEAD(&fdev->common.channels);
+
+ /* get DMA controller register base */
+ err = of_address_to_resource(dev->node, 0, &fdev->reg);
+ if (err) {
+ dev_err(&dev->dev, "Can't get %s property 'reg'\n",
+ dev->node->full_name);
+ goto err;
+ }
+
+ dev_info(&dev->dev, "Probe the Freescale DMA driver for %s "
+ "controller at 0x%08x...\n",
+ match->compatible, fdev->reg.start);
+ fdev->reg_base = ioremap(fdev->reg.start, fdev->reg.end
+ - fdev->reg.start + 1);
+
+ dma_cap_set(DMA_MEMCPY, fdev->common.cap_mask);
+ dma_cap_set(DMA_INTERRUPT, fdev->common.cap_mask);
+ fdev->common.device_alloc_chan_resources = fsl_dma_alloc_chan_resources;
+ fdev->common.device_free_chan_resources = fsl_dma_free_chan_resources;
+ fdev->common.device_prep_dma_memcpy = fsl_dma_prep_memcpy;
+ fdev->common.device_is_tx_complete = fsl_dma_is_complete;
+ fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending;
+ fdev->common.device_dependency_added = fsl_dma_dependency_added;
+ fdev->common.dev = &dev->dev;
+
+ irq = irq_of_parse_and_map(dev->node, 0);
+ if (irq != NO_IRQ) {
+ err = request_irq(irq, &fsl_dma_do_interrupt, IRQF_SHARED,
+ "fsldma-device", fdev);
+ if (err) {
+ dev_err(&dev->dev, "DMA device request_irq error "
+ "with return %d\n", err);
+ goto err;
+ }
+ }
+
+ dev_set_drvdata(&(dev->dev), fdev);
+ of_platform_bus_probe(dev->node, of_fsl_dma_chan_ids, &dev->dev);
+
+ dma_async_device_register(&fdev->common);
+ return 0;
+
+err:
+ iounmap(fdev->reg_base);
+ kfree(fdev);
+ return err;
+}
+
+static struct of_device_id of_fsl_dma_ids[] = {
+ { .compatible = "fsl,mpc8540-dma", },
+ { .compatible = "fsl,mpc8349-dma", },
+ {}
+};
+
+static struct of_platform_driver of_fsl_dma_driver = {
+ .name = "of-fsl-dma",
+ .match_table = of_fsl_dma_ids,
+ .probe = of_fsl_dma_probe,
+};
+
+static __init int of_fsl_dma_init(void)
+{
+ return of_register_platform_driver(&of_fsl_dma_driver);
+}
+
+subsys_initcall(of_fsl_dma_chan_init);
+subsys_initcall(of_fsl_dma_init);
diff --git a/drivers/dma/fsldma.h b/drivers/dma/fsldma.h
new file mode 100644
index 00000000000..ba78c42121b
--- /dev/null
+++ b/drivers/dma/fsldma.h
@@ -0,0 +1,189 @@
+/*
+ * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
+ *
+ * Author:
+ * Zhang Wei <wei.zhang@freescale.com>, Jul 2007
+ * Ebony Zhu <ebony.zhu@freescale.com>, May 2007
+ *
+ * This is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+#ifndef __DMA_FSLDMA_H
+#define __DMA_FSLDMA_H
+
+#include <linux/device.h>
+#include <linux/dmapool.h>
+#include <linux/dmaengine.h>
+
+/* Define data structures needed by Freescale
+ * MPC8540 and MPC8349 DMA controller.
+ */
+#define FSL_DMA_MR_CS 0x00000001
+#define FSL_DMA_MR_CC 0x00000002
+#define FSL_DMA_MR_CA 0x00000008
+#define FSL_DMA_MR_EIE 0x00000040
+#define FSL_DMA_MR_XFE 0x00000020
+#define FSL_DMA_MR_EOLNIE 0x00000100
+#define FSL_DMA_MR_EOLSIE 0x00000080
+#define FSL_DMA_MR_EOSIE 0x00000200
+#define FSL_DMA_MR_CDSM 0x00000010
+#define FSL_DMA_MR_CTM 0x00000004
+#define FSL_DMA_MR_EMP_EN 0x00200000
+#define FSL_DMA_MR_EMS_EN 0x00040000
+#define FSL_DMA_MR_DAHE 0x00002000
+#define FSL_DMA_MR_SAHE 0x00001000
+
+/* Special MR definition for MPC8349 */
+#define FSL_DMA_MR_EOTIE 0x00000080
+
+#define FSL_DMA_SR_CH 0x00000020
+#define FSL_DMA_SR_CB 0x00000004
+#define FSL_DMA_SR_TE 0x00000080
+#define FSL_DMA_SR_EOSI 0x00000002
+#define FSL_DMA_SR_EOLSI 0x00000001
+#define FSL_DMA_SR_EOCDI 0x00000001
+#define FSL_DMA_SR_EOLNI 0x00000008
+
+#define FSL_DMA_SATR_SBPATMU 0x20000000
+#define FSL_DMA_SATR_STRANSINT_RIO 0x00c00000
+#define FSL_DMA_SATR_SREADTYPE_SNOOP_READ 0x00050000
+#define FSL_DMA_SATR_SREADTYPE_BP_IORH 0x00020000
+#define FSL_DMA_SATR_SREADTYPE_BP_NREAD 0x00040000
+#define FSL_DMA_SATR_SREADTYPE_BP_MREAD 0x00070000
+
+#define FSL_DMA_DATR_DBPATMU 0x20000000
+#define FSL_DMA_DATR_DTRANSINT_RIO 0x00c00000
+#define FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE 0x00050000
+#define FSL_DMA_DATR_DWRITETYPE_BP_FLUSH 0x00010000
+
+#define FSL_DMA_EOL ((u64)0x1)
+#define FSL_DMA_SNEN ((u64)0x10)
+#define FSL_DMA_EOSIE 0x8
+#define FSL_DMA_NLDA_MASK (~(u64)0x1f)
+
+#define FSL_DMA_BCR_MAX_CNT 0x03ffffffu
+
+#define FSL_DMA_DGSR_TE 0x80
+#define FSL_DMA_DGSR_CH 0x20
+#define FSL_DMA_DGSR_PE 0x10
+#define FSL_DMA_DGSR_EOLNI 0x08
+#define FSL_DMA_DGSR_CB 0x04
+#define FSL_DMA_DGSR_EOSI 0x02
+#define FSL_DMA_DGSR_EOLSI 0x01
+
+struct fsl_dma_ld_hw {
+ u64 __bitwise src_addr;
+ u64 __bitwise dst_addr;
+ u64 __bitwise next_ln_addr;
+ u32 __bitwise count;
+ u32 __bitwise reserve;
+} __attribute__((aligned(32)));
+
+struct fsl_desc_sw {
+ struct fsl_dma_ld_hw hw;
+ struct list_head node;
+ struct dma_async_tx_descriptor async_tx;
+ struct list_head *ld;
+ void *priv;
+} __attribute__((aligned(32)));
+
+struct fsl_dma_chan_regs {
+ u32 __bitwise mr; /* 0x00 - Mode Register */
+ u32 __bitwise sr; /* 0x04 - Status Register */
+ u64 __bitwise cdar; /* 0x08 - Current descriptor address register */
+ u64 __bitwise sar; /* 0x10 - Source Address Register */
+ u64 __bitwise dar; /* 0x18 - Destination Address Register */
+ u32 __bitwise bcr; /* 0x20 - Byte Count Register */
+ u64 __bitwise ndar; /* 0x24 - Next Descriptor Address Register */
+};
+
+struct fsl_dma_chan;
+#define FSL_DMA_MAX_CHANS_PER_DEVICE 4
+
+struct fsl_dma_device {
+ void __iomem *reg_base; /* DGSR register base */
+ struct resource reg; /* Resource for register */
+ struct device *dev;
+ struct dma_device common;
+ struct fsl_dma_chan *chan[FSL_DMA_MAX_CHANS_PER_DEVICE];
+ u32 feature; /* The same as DMA channels */
+};
+
+/* Define macros for fsl_dma_chan->feature property */
+#define FSL_DMA_LITTLE_ENDIAN 0x00000000
+#define FSL_DMA_BIG_ENDIAN 0x00000001
+
+#define FSL_DMA_IP_MASK 0x00000ff0
+#define FSL_DMA_IP_85XX 0x00000010
+#define FSL_DMA_IP_83XX 0x00000020
+
+#define FSL_DMA_CHAN_PAUSE_EXT 0x00001000
+#define FSL_DMA_CHAN_START_EXT 0x00002000
+
+struct fsl_dma_chan {
+ struct fsl_dma_chan_regs __iomem *reg_base;
+ dma_cookie_t completed_cookie; /* The maximum cookie completed */
+ spinlock_t desc_lock; /* Descriptor operation lock */
+ struct list_head ld_queue; /* Link descriptors queue */
+ struct dma_chan common; /* DMA common channel */
+ struct dma_pool *desc_pool; /* Descriptors pool */
+ struct device *dev; /* Channel device */
+ struct resource reg; /* Resource for register */
+ int irq; /* Channel IRQ */
+ int id; /* Raw id of this channel */
+ struct tasklet_struct tasklet;
+ u32 feature;
+
+ void (*toggle_ext_pause)(struct fsl_dma_chan *fsl_chan, int size);
+ void (*toggle_ext_start)(struct fsl_dma_chan *fsl_chan, int enable);
+ void (*set_src_loop_size)(struct fsl_dma_chan *fsl_chan, int size);
+ void (*set_dest_loop_size)(struct fsl_dma_chan *fsl_chan, int size);
+};
+
+#define to_fsl_chan(chan) container_of(chan, struct fsl_dma_chan, common)
+#define to_fsl_desc(lh) container_of(lh, struct fsl_desc_sw, node)
+#define tx_to_fsl_desc(tx) container_of(tx, struct fsl_desc_sw, async_tx)
+
+#ifndef __powerpc64__
+static u64 in_be64(const u64 __iomem *addr)
+{
+ return ((u64)in_be32((u32 *)addr) << 32) | (in_be32((u32 *)addr + 1));
+}
+
+static void out_be64(u64 __iomem *addr, u64 val)
+{
+ out_be32((u32 *)addr, val >> 32);
+ out_be32((u32 *)addr + 1, (u32)val);
+}
+
+/* There is no asm instructions for 64 bits reverse loads and stores */
+static u64 in_le64(const u64 __iomem *addr)
+{
+ return ((u64)in_le32((u32 *)addr + 1) << 32) | (in_le32((u32 *)addr));
+}
+
+static void out_le64(u64 __iomem *addr, u64 val)
+{
+ out_le32((u32 *)addr + 1, val >> 32);
+ out_le32((u32 *)addr, (u32)val);
+}
+#endif
+
+#define DMA_IN(fsl_chan, addr, width) \
+ (((fsl_chan)->feature & FSL_DMA_BIG_ENDIAN) ? \
+ in_be##width(addr) : in_le##width(addr))
+#define DMA_OUT(fsl_chan, addr, val, width) \
+ (((fsl_chan)->feature & FSL_DMA_BIG_ENDIAN) ? \
+ out_be##width(addr, val) : out_le##width(addr, val))
+
+#define DMA_TO_CPU(fsl_chan, d, width) \
+ (((fsl_chan)->feature & FSL_DMA_BIG_ENDIAN) ? \
+ be##width##_to_cpu(d) : le##width##_to_cpu(d))
+#define CPU_TO_DMA(fsl_chan, c, width) \
+ (((fsl_chan)->feature & FSL_DMA_BIG_ENDIAN) ? \
+ cpu_to_be##width(c) : cpu_to_le##width(c))
+
+#endif /* __DMA_FSLDMA_H */
diff --git a/drivers/dma/ioat_dma.c b/drivers/dma/ioat_dma.c
index dff38accc5c..4017d9e7acd 100644
--- a/drivers/dma/ioat_dma.c
+++ b/drivers/dma/ioat_dma.c
@@ -714,6 +714,7 @@ static struct dma_async_tx_descriptor *ioat1_dma_prep_memcpy(
new->len = len;
new->dst = dma_dest;
new->src = dma_src;
+ new->async_tx.ack = 0;
return &new->async_tx;
} else
return NULL;
@@ -741,6 +742,7 @@ static struct dma_async_tx_descriptor *ioat2_dma_prep_memcpy(
new->len = len;
new->dst = dma_dest;
new->src = dma_src;
+ new->async_tx.ack = 0;
return &new->async_tx;
} else
return NULL;
diff --git a/drivers/net/fec.c b/drivers/net/fec.c
index 0fbf1bbbaee..d7a3ea88edd 100644
--- a/drivers/net/fec.c
+++ b/drivers/net/fec.c
@@ -1253,7 +1253,7 @@ static void __inline__ fec_request_intrs(struct net_device *dev)
/* Setup interrupt handlers. */
for (idp = id; idp->name; idp++) {
- if (request_irq(idp->irq, idp->handler, 0, idp->name, dev) != 0)
+ if (request_irq(idp->irq, idp->handler, IRQF_DISABLED, idp->name, dev) != 0)
printk("FEC: Could not allocate %s IRQ(%d)!\n", idp->name, idp->irq);
}
@@ -1382,7 +1382,7 @@ static void __inline__ fec_request_intrs(struct net_device *dev)
/* Setup interrupt handlers. */
for (idp = id; idp->name; idp++) {
- if (request_irq(b+idp->irq, fec_enet_interrupt, 0, idp->name, dev) != 0)
+ if (request_irq(b+idp->irq, fec_enet_interrupt, IRQF_DISABLED, idp->name, dev) != 0)
printk("FEC: Could not allocate %s IRQ(%d)!\n", idp->name, b+idp->irq);
}
@@ -1553,7 +1553,7 @@ static void __inline__ fec_request_intrs(struct net_device *dev)
/* Setup interrupt handlers. */
for (idp = id; idp->name; idp++) {
- if (request_irq(b+idp->irq,fec_enet_interrupt,0,idp->name,dev)!=0)
+ if (request_irq(b+idp->irq, fec_enet_interrupt, IRQF_DISABLED, idp->name,dev) != 0)
printk("FEC: Could not allocate %s IRQ(%d)!\n", idp->name, b+idp->irq);
}
@@ -1680,7 +1680,7 @@ static void __inline__ fec_request_intrs(struct net_device *dev)
/* Setup interrupt handlers. */
for (idp = id; idp->name; idp++) {
- if (request_irq(b+idp->irq,fec_enet_interrupt,0,idp->name,dev)!=0)
+ if (request_irq(b+idp->irq, fec_enet_interrupt, IRQF_DISABLED, idp->name,dev) != 0)
printk("FEC: Could not allocate %s IRQ(%d)!\n",
idp->name, b+idp->irq);
}
diff --git a/drivers/s390/char/defkeymap.c b/drivers/s390/char/defkeymap.c
index 9692d6a205e..07c7f31081b 100644
--- a/drivers/s390/char/defkeymap.c
+++ b/drivers/s390/char/defkeymap.c
@@ -152,7 +152,7 @@ char *func_table[MAX_NR_FUNC] = {
struct kbdiacruc accent_table[MAX_DIACR] = {
{'^', 'c', 0003}, {'^', 'd', 0004},
- {'^', 'z', 0032}, {'^', 0012', 0000},
+ {'^', 'z', 0032}, {'^', 0012, 0000},
};
unsigned int accent_table_size = 4;
diff --git a/fs/cifs/CHANGES b/fs/cifs/CHANGES
index edd248367b3..dbd91461853 100644
--- a/fs/cifs/CHANGES
+++ b/fs/cifs/CHANGES
@@ -6,7 +6,9 @@ and sync so that events like out of disk space get reported properly on
cached files. Fix setxattr failure to certain Samba versions. Fix mount
of second share to disconnected server session (autoreconnect on this).
Add ability to modify cifs acls for handling chmod (when mounted with
-cifsacl flag).
+cifsacl flag). Fix prefixpath path separator so we can handle mounts
+with prefixpaths longer than one directory (one path component) when
+mounted to Windows servers.
Version 1.51
------------
diff --git a/fs/cifs/README b/fs/cifs/README
index c623e2f9c5d..50306229b0f 100644
--- a/fs/cifs/README
+++ b/fs/cifs/README
@@ -461,7 +461,7 @@ A partial list of the supported mount options follows:
cifsacl Report mode bits (e.g. on stat) based on the Windows ACL for
the file. (EXPERIMENTAL)
servern Specify the server 's netbios name (RFC1001 name) to use
- when attempting to setup a session to the server. This is
+ when attempting to setup a session to the server.
This is needed for mounting to some older servers (such
as OS/2 or Windows 98 and Windows ME) since they do not
support a default server name. A server name can be up
diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
index 73c4c419663..0228ed06069 100644
--- a/fs/cifs/cifs_debug.c
+++ b/fs/cifs/cifs_debug.c
@@ -98,8 +98,7 @@ void cifs_dump_mids(struct TCP_Server_Info *server)
if (mid_entry->resp_buf) {
cifs_dump_detail(mid_entry->resp_buf);
cifs_dump_mem("existing buf: ",
- mid_entry->resp_buf,
- 62 /* fixme */);
+ mid_entry->resp_buf, 62);
}
}
}
@@ -439,7 +438,7 @@ cifs_stats_read(char *buf, char **beginBuffer, off_t offset,
return length;
}
-#endif
+#endif /* STATS */
static struct proc_dir_entry *proc_fs_cifs;
read_proc_t cifs_txanchor_read;
@@ -482,7 +481,7 @@ cifs_proc_init(void)
cifs_stats_read, NULL);
if (pde)
pde->write_proc = cifs_stats_write;
-#endif
+#endif /* STATS */
pde = create_proc_read_entry("cifsFYI", 0, proc_fs_cifs,
cifsFYI_read, NULL);
if (pde)
@@ -918,4 +917,12 @@ security_flags_write(struct file *file, const char __user *buffer,
/* BB should we turn on MAY flags for other MUST options? */
return count;
}
-#endif
+#else
+inline void cifs_proc_init(void)
+{
+}
+
+inline void cifs_proc_clean(void)
+{
+}
+#endif /* PROC_FS */
diff --git a/fs/cifs/cifs_debug.h b/fs/cifs/cifs_debug.h
index c26cd0d2c6d..5eb3b83bbfa 100644
--- a/fs/cifs/cifs_debug.h
+++ b/fs/cifs/cifs_debug.h
@@ -25,8 +25,11 @@
void cifs_dump_mem(char *label, void *data, int length);
#ifdef CONFIG_CIFS_DEBUG2
+#define DBG2 2
void cifs_dump_detail(struct smb_hdr *);
void cifs_dump_mids(struct TCP_Server_Info *);
+#else
+#define DBG2 0
#endif
extern int traceSMB; /* flag which enables the function below */
void dump_smb(struct smb_hdr *, int);
@@ -64,10 +67,10 @@ extern int cifsERROR;
* ---------
*/
#else /* _CIFS_DEBUG */
-#define cERROR(button,prspec)
-#define cEVENT(format,arg...)
+#define cERROR(button, prspec)
+#define cEVENT(format, arg...)
#define cFYI(button, prspec)
-#define cifserror(format,arg...)
+#define cifserror(format, arg...)
#endif /* _CIFS_DEBUG */
#endif /* _H_CIFS_DEBUG */
diff --git a/fs/cifs/cifs_dfs_ref.c b/fs/cifs/cifs_dfs_ref.c
index 6ad44752996..7f883825341 100644
--- a/fs/cifs/cifs_dfs_ref.c
+++ b/fs/cifs/cifs_dfs_ref.c
@@ -286,7 +286,7 @@ static void dump_referral(const struct dfs_info3_param *ref)
cFYI(1, ("DFS: node path: %s", ref->node_name));
cFYI(1, ("DFS: fl: %hd, srv_type: %hd", ref->flags, ref->server_type));
cFYI(1, ("DFS: ref_flags: %hd, path_consumed: %hd", ref->ref_flag,
- ref->PathConsumed));
+ ref->path_consumed));
}
diff --git a/fs/cifs/cifs_spnego.c b/fs/cifs/cifs_spnego.c
index d543accc10d..6653e29637a 100644
--- a/fs/cifs/cifs_spnego.c
+++ b/fs/cifs/cifs_spnego.c
@@ -125,7 +125,7 @@ cifs_get_spnego_key(struct cifsSesInfo *sesInfo)
#ifdef CONFIG_CIFS_DEBUG2
if (cifsFYI && !IS_ERR(spnego_key)) {
struct cifs_spnego_msg *msg = spnego_key->payload.data;
- cifs_dump_mem("SPNEGO reply blob:", msg->data, min(1024,
+ cifs_dump_mem("SPNEGO reply blob:", msg->data, min(1024U,
msg->secblob_len + msg->sesskey_len));
}
#endif /* CONFIG_CIFS_DEBUG2 */
diff --git a/fs/cifs/cifs_unicode.c b/fs/cifs/cifs_unicode.c
index b5903b89250..7d75272a6b3 100644
--- a/fs/cifs/cifs_unicode.c
+++ b/fs/cifs/cifs_unicode.c
@@ -32,7 +32,7 @@
*
*/
int
-cifs_strfromUCS_le(char *to, const __le16 * from,
+cifs_strfromUCS_le(char *to, const __le16 *from,
int len, const struct nls_table *codepage)
{
int i;
@@ -61,7 +61,7 @@ cifs_strfromUCS_le(char *to, const __le16 * from,
*
*/
int
-cifs_strtoUCS(__le16 * to, const char *from, int len,
+cifs_strtoUCS(__le16 *to, const char *from, int len,
const struct nls_table *codepage)
{
int charlen;
diff --git a/fs/cifs/cifs_unicode.h b/fs/cifs/cifs_unicode.h
index 614c11fcdcb..14eb9a2395d 100644
--- a/fs/cifs/cifs_unicode.h
+++ b/fs/cifs/cifs_unicode.h
@@ -254,7 +254,8 @@ UniStrstr(const wchar_t *ucs1, const wchar_t *ucs2)
const wchar_t *anchor2 = ucs2;
while (*ucs1) {
- if (*ucs1 == *ucs2) { /* Partial match found */
+ if (*ucs1 == *ucs2) {
+ /* Partial match found */
ucs1++;
ucs2++;
} else {
@@ -279,7 +280,8 @@ UniToupper(register wchar_t uc)
{
register const struct UniCaseRange *rp;
- if (uc < sizeof (CifsUniUpperTable)) { /* Latin characters */
+ if (uc < sizeof(CifsUniUpperTable)) {
+ /* Latin characters */
return uc + CifsUniUpperTable[uc]; /* Use base tables */
} else {
rp = CifsUniUpperRange; /* Use range tables */
@@ -320,7 +322,8 @@ UniTolower(wchar_t uc)
{
register struct UniCaseRange *rp;
- if (uc < sizeof (UniLowerTable)) { /* Latin characters */
+ if (uc < sizeof(UniLowerTable)) {
+ /* Latin characters */
return uc + UniLowerTable[uc]; /* Use base tables */
} else {
rp = UniLowerRange; /* Use range tables */
diff --git a/fs/cifs/cifsacl.c b/fs/cifs/cifsacl.c
index a7035bd18e4..f93932c2177 100644
--- a/fs/cifs/cifsacl.c
+++ b/fs/cifs/cifsacl.c
@@ -46,8 +46,7 @@ static struct cifs_wksid wksidarr[NUM_WK_SIDS] = {
static const struct cifs_sid sid_everyone = {
1, 1, {0, 0, 0, 0, 0, 1}, {0} };
/* group users */
-static const struct cifs_sid sid_user =
- {1, 2 , {0, 0, 0, 0, 0, 5}, {} };
+static const struct cifs_sid sid_user = {1, 2 , {0, 0, 0, 0, 0, 5}, {} };
int match_sid(struct cifs_sid *ctsid)
@@ -195,9 +194,9 @@ static void access_flags_to_mode(__le32 ace_flags, int type, umode_t *pmode,
/* For deny ACEs we change the mask so that subsequent allow access
control entries do not turn on the bits we are denying */
if (type == ACCESS_DENIED) {
- if (flags & GENERIC_ALL) {
+ if (flags & GENERIC_ALL)
*pbits_to_set &= ~S_IRWXUGO;
- }
+
if ((flags & GENERIC_WRITE) ||
((flags & FILE_WRITE_RIGHTS) == FILE_WRITE_RIGHTS))
*pbits_to_set &= ~S_IWUGO;
@@ -216,9 +215,7 @@ static void access_flags_to_mode(__le32 ace_flags, int type, umode_t *pmode,
if (flags & GENERIC_ALL) {
*pmode |= (S_IRWXUGO & (*pbits_to_set));
-#ifdef CONFIG_CIFS_DEBUG2
- cFYI(1, ("all perms"));
-#endif
+ cFYI(DBG2, ("all perms"));
return;
}
if ((flags & GENERIC_WRITE) ||
@@ -231,9 +228,7 @@ static void access_flags_to_mode(__le32 ace_flags, int type, umode_t *pmode,
((flags & FILE_EXEC_RIGHTS) == FILE_EXEC_RIGHTS))
*pmode |= (S_IXUGO & (*pbits_to_set));
-#ifdef CONFIG_CIFS_DEBUG2
- cFYI(1, ("access flags 0x%x mode now 0x%x", flags, *pmode));
-#endif
+ cFYI(DBG2, ("access flags 0x%x mode now 0x%x", flags, *pmode));
return;
}
@@ -262,9 +257,7 @@ static void mode_to_access_flags(umode_t mode, umode_t bits_to_use,
if (mode & S_IXUGO)
*pace_flags |= SET_FILE_EXEC_RIGHTS;
-#ifdef CONFIG_CIFS_DEBUG2
- cFYI(1, ("mode: 0x%x, access flags now 0x%x", mode, *pace_flags));
-#endif
+ cFYI(DBG2, ("mode: 0x%x, access flags now 0x%x", mode, *pace_flags));
return;
}
@@ -358,11 +351,9 @@ static void parse_dacl(struct cifs_acl *pdacl, char *end_of_acl,
return;
}
-#ifdef CONFIG_CIFS_DEBUG2
- cFYI(1, ("DACL revision %d size %d num aces %d",
+ cFYI(DBG2, ("DACL revision %d size %d num aces %d",
le16_to_cpu(pdacl->revision), le16_to_cpu(pdacl->size),
le32_to_cpu(pdacl->num_aces)));
-#endif
/* reset rwx permissions for user/group/other.
Also, if num_aces is 0 i.e. DACL has no ACEs,
@@ -381,10 +372,6 @@ static void parse_dacl(struct cifs_acl *pdacl, char *end_of_acl,
ppace = kmalloc(num_aces * sizeof(struct cifs_ace *),
GFP_KERNEL);
-/* cifscred->cecount = pdacl->num_aces;
- cifscred->aces = kmalloc(num_aces *
- sizeof(struct cifs_ace *), GFP_KERNEL);*/
-
for (i = 0; i < num_aces; ++i) {
ppace[i] = (struct cifs_ace *) (acl_base + acl_size);
#ifdef CONFIG_CIFS_DEBUG2
@@ -437,7 +424,7 @@ static int set_chmod_dacl(struct cifs_acl *pndacl, struct cifs_sid *pownersid,
&sid_everyone, nmode, S_IRWXO);
pndacl->size = cpu_to_le16(size + sizeof(struct cifs_acl));
- pndacl->num_aces = 3;
+ pndacl->num_aces = cpu_to_le32(3);
return (0);
}
@@ -495,13 +482,11 @@ static int parse_sec_desc(struct cifs_ntsd *pntsd, int acl_len,
le32_to_cpu(pntsd->gsidoffset));
dacloffset = le32_to_cpu(pntsd->dacloffset);
dacl_ptr = (struct cifs_acl *)((char *)pntsd + dacloffset);
-#ifdef CONFIG_CIFS_DEBUG2
- cFYI(1, ("revision %d type 0x%x ooffset 0x%x goffset 0x%x "
+ cFYI(DBG2, ("revision %d type 0x%x ooffset 0x%x goffset 0x%x "
"sacloffset 0x%x dacloffset 0x%x",
pntsd->revision, pntsd->type, le32_to_cpu(pntsd->osidoffset),
le32_to_cpu(pntsd->gsidoffset),
le32_to_cpu(pntsd->sacloffset), dacloffset));
-#endif
/* cifs_dump_mem("owner_sid: ", owner_sid_ptr, 64); */
rc = parse_sid(owner_sid_ptr, end_of_acl);
if (rc)
@@ -636,9 +621,7 @@ static int set_cifs_acl(struct cifs_ntsd *pnntsd, __u32 acllen,
struct super_block *sb;
struct cifs_sb_info *cifs_sb;
-#ifdef CONFIG_CIFS_DEBUG2
- cFYI(1, ("set ACL for %s from mode 0x%x", path, inode->i_mode));
-#endif
+ cFYI(DBG2, ("set ACL for %s from mode 0x%x", path, inode->i_mode));
if (!inode)
return (rc);
@@ -669,9 +652,7 @@ static int set_cifs_acl(struct cifs_ntsd *pnntsd, __u32 acllen,
}
rc = CIFSSMBSetCIFSACL(xid, cifs_sb->tcon, fid, pnntsd, acllen);
-#ifdef CONFIG_CIFS_DEBUG2
- cFYI(1, ("SetCIFSACL rc = %d", rc));
-#endif
+ cFYI(DBG2, ("SetCIFSACL rc = %d", rc));
if (unlock_file == TRUE)
atomic_dec(&open_file->wrtPending);
else
@@ -689,9 +670,7 @@ void acl_to_uid_mode(struct inode *inode, const char *path)
u32 acllen = 0;
int rc = 0;
-#ifdef CONFIG_CIFS_DEBUG2
- cFYI(1, ("converting ACL to mode for %s", path));
-#endif
+ cFYI(DBG2, ("converting ACL to mode for %s", path));
pntsd = get_cifs_acl(&acllen, inode, path);
/* if we can retrieve the ACL, now parse Access Control Entries, ACEs */
@@ -712,9 +691,7 @@ int mode_to_acl(struct inode *inode, const char *path, __u64 nmode)
struct cifs_ntsd *pntsd = NULL; /* acl obtained from server */
struct cifs_ntsd *pnntsd = NULL; /* modified acl to be sent to server */
-#ifdef CONFIG_CIFS_DEBUG2
- cFYI(1, ("set ACL from mode for %s", path));
-#endif
+ cFYI(DBG2, ("set ACL from mode for %s", path));
/* Get the security descriptor */
pntsd = get_cifs_acl(&acllen, inode, path);
@@ -736,16 +713,12 @@ int mode_to_acl(struct inode *inode, const char *path, __u64 nmode)
rc = build_sec_desc(pntsd, pnntsd, acllen, inode, nmode);
-#ifdef CONFIG_CIFS_DEBUG2
- cFYI(1, ("build_sec_desc rc: %d", rc));
-#endif
+ cFYI(DBG2, ("build_sec_desc rc: %d", rc));
if (!rc) {
/* Set the security descriptor */
rc = set_cifs_acl(pnntsd, acllen, inode, path);
-#ifdef CONFIG_CIFS_DEBUG2
- cFYI(1, ("set_cifs_acl rc: %d", rc));
-#endif
+ cFYI(DBG2, ("set_cifs_acl rc: %d", rc));
}
kfree(pnntsd);
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index fcc43422769..a04b17e5a9d 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -204,9 +204,8 @@ cifs_put_super(struct super_block *sb)
return;
}
rc = cifs_umount(sb, cifs_sb);
- if (rc) {
+ if (rc)
cERROR(1, ("cifs_umount failed with return code %d", rc));
- }
#ifdef CONFIG_CIFS_DFS_UPCALL
if (cifs_sb->mountdata) {
kfree(cifs_sb->mountdata);
@@ -461,7 +460,7 @@ int cifs_xstate_get(struct super_block *sb, struct fs_quota_stat *qstats)
static struct quotactl_ops cifs_quotactl_ops = {
.set_xquota = cifs_xquota_set,
- .get_xquota = cifs_xquota_set,
+ .get_xquota = cifs_xquota_get,
.set_xstate = cifs_xstate_set,
.get_xstate = cifs_xstate_get,
};
@@ -472,9 +471,7 @@ static void cifs_umount_begin(struct vfsmount *vfsmnt, int flags)
struct cifs_sb_info *cifs_sb;
struct cifsTconInfo *tcon;
-#ifdef CONFIG_CIFS_DFS_UPCALL
dfs_shrink_umount_helper(vfsmnt);
-#endif /* CONFIG CIFS_DFS_UPCALL */
if (!(flags & MNT_FORCE))
return;
@@ -992,9 +989,7 @@ static int __init
init_cifs(void)
{
int rc = 0;
-#ifdef CONFIG_PROC_FS
cifs_proc_init();
-#endif
/* INIT_LIST_HEAD(&GlobalServerList);*/ /* BB not implemented yet */
INIT_LIST_HEAD(&GlobalSMBSessionList);
INIT_LIST_HEAD(&GlobalTreeConnectionList);
@@ -1095,19 +1090,15 @@ init_cifs(void)
out_destroy_inodecache:
cifs_destroy_inodecache();
out_clean_proc:
-#ifdef CONFIG_PROC_FS
cifs_proc_clean();
-#endif
return rc;
}
static void __exit
exit_cifs(void)
{
- cFYI(0, ("exit_cifs"));
-#ifdef CONFIG_PROC_FS
+ cFYI(DBG2, ("exit_cifs"));
cifs_proc_clean();
-#endif
#ifdef CONFIG_CIFS_DFS_UPCALL
unregister_key_type(&key_type_dns_resolver);
#endif
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index 5d32d8ddc82..69a2e194254 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -454,7 +454,7 @@ struct dir_notify_req {
struct dfs_info3_param {
int flags; /* DFSREF_REFERRAL_SERVER, DFSREF_STORAGE_SERVER*/
- int PathConsumed;
+ int path_consumed;
int server_type;
int ref_flag;
char *path_name;
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
index 2f09f565a3d..0af63e6b426 100644
--- a/fs/cifs/cifsproto.h
+++ b/fs/cifs/cifsproto.h
@@ -53,11 +53,11 @@ extern int SendReceiveNoRsp(const unsigned int xid, struct cifsSesInfo *ses,
extern int SendReceive2(const unsigned int /* xid */ , struct cifsSesInfo *,
struct kvec *, int /* nvec to send */,
int * /* type of buf returned */ , const int flags);
-extern int SendReceiveBlockingLock(const unsigned int /* xid */ ,
- struct cifsTconInfo *,
- struct smb_hdr * /* input */ ,
- struct smb_hdr * /* out */ ,
- int * /* bytes returned */);
+extern int SendReceiveBlockingLock(const unsigned int xid,
+ struct cifsTconInfo *ptcon,
+ struct smb_hdr *in_buf ,
+ struct smb_hdr *out_buf,
+ int *bytes_returned);
extern int checkSMB(struct smb_hdr *smb, __u16 mid, unsigned int length);
extern int is_valid_oplock_break(struct smb_hdr *smb, struct TCP_Server_Info *);
extern int is_size_safe_to_change(struct cifsInodeInfo *, __u64 eof);
@@ -84,7 +84,7 @@ extern __u16 GetNextMid(struct TCP_Server_Info *server);
extern struct oplock_q_entry *AllocOplockQEntry(struct inode *, u16,
struct cifsTconInfo *);
extern void DeleteOplockQEntry(struct oplock_q_entry *);
-extern struct timespec cifs_NTtimeToUnix(u64 /* utc nanoseconds since 1601 */ );
+extern struct timespec cifs_NTtimeToUnix(u64 utc_nanoseconds_since_1601);
extern u64 cifs_UnixTimeToNT(struct timespec);
extern __le64 cnvrtDosCifsTm(__u16 date, __u16 time);
extern struct timespec cnvrtDosUnixTm(__u16 date, __u16 time);
@@ -104,7 +104,11 @@ extern int cifs_mount(struct super_block *, struct cifs_sb_info *, char *,
extern int cifs_umount(struct super_block *, struct cifs_sb_info *);
#ifdef CONFIG_CIFS_DFS_UPCALL
extern void dfs_shrink_umount_helper(struct vfsmount *vfsmnt);
-#endif
+#else
+static inline void dfs_shrink_umount_helper(struct vfsmount *vfsmnt)
+{
+}
+#endif /* DFS_UPCALL */
void cifs_proc_init(void);
void cifs_proc_clean(void);
@@ -175,11 +179,11 @@ extern int CIFSSMBQFSPosixInfo(const int xid, struct cifsTconInfo *tcon,
struct kstatfs *FSData);
extern int CIFSSMBSetTimes(const int xid, struct cifsTconInfo *tcon,
- const char *fileName, const FILE_BASIC_INFO * data,
+ const char *fileName, const FILE_BASIC_INFO *data,
const struct nls_table *nls_codepage,
int remap_special_chars);
extern int CIFSSMBSetFileTimes(const int xid, struct cifsTconInfo *tcon,
- const FILE_BASIC_INFO * data, __u16 fid);
+ const FILE_BASIC_INFO *data, __u16 fid);
#if 0
extern int CIFSSMBSetAttrLegacy(int xid, struct cifsTconInfo *tcon,
char *fileName, __u16 dos_attributes,
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index 9409524e4bf..30bbe448e26 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -1,7 +1,7 @@
/*
* fs/cifs/cifssmb.c
*
- * Copyright (C) International Business Machines Corp., 2002,2007
+ * Copyright (C) International Business Machines Corp., 2002,2008
* Author(s): Steve French (sfrench@us.ibm.com)
*
* Contains the routines for constructing the SMB PDUs themselves
@@ -102,10 +102,12 @@ static void mark_open_files_invalid(struct cifsTconInfo *pTcon)
to this tcon */
}
-/* If the return code is zero, this function must fill in request_buf pointer */
+/* Allocate and return pointer to an SMB request buffer, and set basic
+ SMB information in the SMB header. If the return code is zero, this
+ function must have filled in request_buf pointer */
static int
small_smb_init(int smb_command, int wct, struct cifsTconInfo *tcon,
- void **request_buf /* returned */)
+ void **request_buf)
{
int rc = 0;
@@ -363,7 +365,7 @@ smb_init(int smb_command, int wct, struct cifsTconInfo *tcon,
*response_buf = *request_buf;
header_assemble((struct smb_hdr *) *request_buf, smb_command, tcon,
- wct /*wct */ );
+ wct);
if (tcon != NULL)
cifs_stats_inc(&tcon->num_smbs_sent);
@@ -523,7 +525,7 @@ CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses)
if (remain >= (MIN_TZ_ADJ / 2))
result += MIN_TZ_ADJ;
if (val < 0)
- result = - result;
+ result = -result;
server->timeAdj = result;
} else {
server->timeAdj = (int)tmp;
@@ -600,7 +602,7 @@ CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses)
server->maxBuf = min(le32_to_cpu(pSMBr->MaxBufferSize),
(__u32) CIFSMaxBufSize + MAX_CIFS_HDR_SIZE);
server->maxRw = le32_to_cpu(pSMBr->MaxRawSize);
- cFYI(0, ("Max buf = %d", ses->server->maxBuf));
+ cFYI(DBG2, ("Max buf = %d", ses->server->maxBuf));
GETU32(ses->server->sessid) = le32_to_cpu(pSMBr->SessionKey);
server->capabilities = le32_to_cpu(pSMBr->Capabilities);
server->timeAdj = (int)(__s16)le16_to_cpu(pSMBr->ServerTimeZone);
@@ -868,9 +870,8 @@ PsxDelete:
pSMB->ByteCount = cpu_to_le16(byte_count);
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
- if (rc) {
+ if (rc)
cFYI(1, ("Posix delete returned %d", rc));
- }
cifs_buf_release(pSMB);
cifs_stats_inc(&tcon->num_deletes);
@@ -916,9 +917,8 @@ DelFileRetry:
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
cifs_stats_inc(&tcon->num_deletes);
- if (rc) {
+ if (rc)
cFYI(1, ("Error in RMFile = %d", rc));
- }
cifs_buf_release(pSMB);
if (rc == -EAGAIN)
@@ -961,9 +961,8 @@ RmDirRetry:
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
cifs_stats_inc(&tcon->num_rmdirs);
- if (rc) {
+ if (rc)
cFYI(1, ("Error in RMDir = %d", rc));
- }
cifs_buf_release(pSMB);
if (rc == -EAGAIN)
@@ -1005,9 +1004,8 @@ MkDirRetry:
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
cifs_stats_inc(&tcon->num_mkdirs);
- if (rc) {
+ if (rc)
cFYI(1, ("Error in Mkdir = %d", rc));
- }
cifs_buf_release(pSMB);
if (rc == -EAGAIN)
@@ -1017,7 +1015,7 @@ MkDirRetry:
int
CIFSPOSIXCreate(const int xid, struct cifsTconInfo *tcon, __u32 posix_flags,
- __u64 mode, __u16 * netfid, FILE_UNIX_BASIC_INFO *pRetData,
+ __u64 mode, __u16 *netfid, FILE_UNIX_BASIC_INFO *pRetData,
__u32 *pOplock, const char *name,
const struct nls_table *nls_codepage, int remap)
{
@@ -1027,8 +1025,8 @@ CIFSPOSIXCreate(const int xid, struct cifsTconInfo *tcon, __u32 posix_flags,
int rc = 0;
int bytes_returned = 0;
__u16 params, param_offset, offset, byte_count, count;
- OPEN_PSX_REQ * pdata;
- OPEN_PSX_RSP * psx_rsp;
+ OPEN_PSX_REQ *pdata;
+ OPEN_PSX_RSP *psx_rsp;
cFYI(1, ("In POSIX Create"));
PsxCreat:
@@ -1110,9 +1108,7 @@ PsxCreat:
/* check to make sure response data is there */
if (psx_rsp->ReturnedLevel != cpu_to_le16(SMB_QUERY_FILE_UNIX_BASIC)) {
pRetData->Type = cpu_to_le32(-1); /* unknown */
-#ifdef CONFIG_CIFS_DEBUG2
- cFYI(1, ("unknown type"));
-#endif
+ cFYI(DBG2, ("unknown type"));
} else {
if (pSMBr->ByteCount < sizeof(OPEN_PSX_RSP)
+ sizeof(FILE_UNIX_BASIC_INFO)) {
@@ -1169,8 +1165,8 @@ static __u16 convert_disposition(int disposition)
int
SMBLegacyOpen(const int xid, struct cifsTconInfo *tcon,
const char *fileName, const int openDisposition,
- const int access_flags, const int create_options, __u16 * netfid,
- int *pOplock, FILE_ALL_INFO * pfile_info,
+ const int access_flags, const int create_options, __u16 *netfid,
+ int *pOplock, FILE_ALL_INFO *pfile_info,
const struct nls_table *nls_codepage, int remap)
{
int rc = -EACCES;
@@ -1221,8 +1217,8 @@ OldOpenRetry:
if (create_options & CREATE_OPTION_SPECIAL)
pSMB->FileAttributes = cpu_to_le16(ATTR_SYSTEM);
- else
- pSMB->FileAttributes = cpu_to_le16(0/*ATTR_NORMAL*/); /* BB FIXME */
+ else /* BB FIXME BB */
+ pSMB->FileAttributes = cpu_to_le16(0/*ATTR_NORMAL*/);
/* if ((omode & S_IWUGO) == 0)
pSMB->FileAttributes |= cpu_to_le32(ATTR_READONLY);*/
@@ -1284,8 +1280,8 @@ OldOpenRetry:
int
CIFSSMBOpen(const int xid, struct cifsTconInfo *tcon,
const char *fileName, const int openDisposition,
- const int access_flags, const int create_options, __u16 * netfid,
- int *pOplock, FILE_ALL_INFO * pfile_info,
+ const int access_flags, const int create_options, __u16 *netfid,
+ int *pOplock, FILE_ALL_INFO *pfile_info,
const struct nls_table *nls_codepage, int remap)
{
int rc = -EACCES;
@@ -1556,9 +1552,9 @@ CIFSSMBWrite(const int xid, struct cifsTconInfo *tcon,
} /* else setting file size with write of zero bytes */
if (wct == 14)
byte_count = bytes_sent + 1; /* pad */
- else /* wct == 12 */ {
+ else /* wct == 12 */
byte_count = bytes_sent + 5; /* bigger pad, smaller smb hdr */
- }
+
pSMB->DataLengthLow = cpu_to_le16(bytes_sent & 0xFFFF);
pSMB->DataLengthHigh = cpu_to_le16(bytes_sent >> 16);
pSMB->hdr.smb_buf_length += byte_count;
@@ -1663,7 +1659,7 @@ CIFSSMBWrite2(const int xid, struct cifsTconInfo *tcon,
rc = -EIO;
*nbytes = 0;
} else {
- WRITE_RSP * pSMBr = (WRITE_RSP *)iov[0].iov_base;
+ WRITE_RSP *pSMBr = (WRITE_RSP *)iov[0].iov_base;
*nbytes = le16_to_cpu(pSMBr->CountHigh);
*nbytes = (*nbytes) << 16;
*nbytes += le16_to_cpu(pSMBr->Count);
@@ -1744,9 +1740,8 @@ CIFSSMBLock(const int xid, struct cifsTconInfo *tcon,
/* SMB buffer freed by function above */
}
cifs_stats_inc(&tcon->num_locks);
- if (rc) {
+ if (rc)
cFYI(1, ("Send error in Lock = %d", rc));
- }
/* Note: On -EAGAIN error only caller can retry on handle based calls
since file handle passed in no longer valid */
@@ -1791,7 +1786,7 @@ CIFSSMBPosixLock(const int xid, struct cifsTconInfo *tcon,
count = sizeof(struct cifs_posix_lock);
pSMB->MaxParameterCount = cpu_to_le16(2);
- pSMB->MaxDataCount = cpu_to_le16(1000); /* BB find max SMB PDU from sess */
+ pSMB->MaxDataCount = cpu_to_le16(1000); /* BB find max SMB from sess */
pSMB->SetupCount = 1;
pSMB->Reserved3 = 0;
if (get_flag)
@@ -1972,9 +1967,8 @@ renameRetry:
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
cifs_stats_inc(&tcon->num_renames);
- if (rc) {
+ if (rc)
cFYI(1, ("Send error in rename = %d", rc));
- }
cifs_buf_release(pSMB);
@@ -2016,7 +2010,7 @@ int CIFSSMBRenameOpenFile(const int xid, struct cifsTconInfo *pTcon,
data_offset = (char *) (&pSMB->hdr.Protocol) + offset;
rename_info = (struct set_file_rename *) data_offset;
pSMB->MaxParameterCount = cpu_to_le16(2);
- pSMB->MaxDataCount = cpu_to_le16(1000); /* BB find max SMB PDU from sess */
+ pSMB->MaxDataCount = cpu_to_le16(1000); /* BB find max SMB from sess */
pSMB->SetupCount = 1;
pSMB->Reserved3 = 0;
pSMB->SubCommand = cpu_to_le16(TRANS2_SET_FILE_INFORMATION);
@@ -2052,9 +2046,8 @@ int CIFSSMBRenameOpenFile(const int xid, struct cifsTconInfo *pTcon,
rc = SendReceive(xid, pTcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
cifs_stats_inc(&pTcon->num_t2renames);
- if (rc) {
+ if (rc)
cFYI(1, ("Send error in Rename (by file handle) = %d", rc));
- }
cifs_buf_release(pSMB);
@@ -2211,9 +2204,8 @@ createSymLinkRetry:
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
cifs_stats_inc(&tcon->num_symlinks);
- if (rc) {
+ if (rc)
cFYI(1, ("Send error in SetPathInfo create symlink = %d", rc));
- }
if (pSMB)
cifs_buf_release(pSMB);
@@ -2299,9 +2291,8 @@ createHardLinkRetry:
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
cifs_stats_inc(&tcon->num_hardlinks);
- if (rc) {
+ if (rc)
cFYI(1, ("Send error in SetPathInfo (hard link) = %d", rc));
- }
cifs_buf_release(pSMB);
if (rc == -EAGAIN)
@@ -2370,9 +2361,9 @@ winCreateHardLinkRetry:
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
cifs_stats_inc(&tcon->num_hardlinks);
- if (rc) {
+ if (rc)
cFYI(1, ("Send error in hard link (NT rename) = %d", rc));
- }
+
cifs_buf_release(pSMB);
if (rc == -EAGAIN)
goto winCreateHardLinkRetry;
@@ -2968,9 +2959,8 @@ setAclRetry:
pSMB->ByteCount = cpu_to_le16(byte_count);
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
- if (rc) {
+ if (rc)
cFYI(1, ("Set POSIX ACL returned %d", rc));
- }
setACLerrorExit:
cifs_buf_release(pSMB);
@@ -2982,7 +2972,7 @@ setACLerrorExit:
/* BB fix tabs in this function FIXME BB */
int
CIFSGetExtAttr(const int xid, struct cifsTconInfo *tcon,
- const int netfid, __u64 * pExtAttrBits, __u64 *pMask)
+ const int netfid, __u64 *pExtAttrBits, __u64 *pMask)
{
int rc = 0;
struct smb_t2_qfi_req *pSMB = NULL;
@@ -3000,7 +2990,7 @@ GetExtAttrRetry:
if (rc)
return rc;
- params = 2 /* level */ +2 /* fid */;
+ params = 2 /* level */ + 2 /* fid */;
pSMB->t2.TotalDataCount = 0;
pSMB->t2.MaxParameterCount = cpu_to_le16(4);
/* BB find exact max data count below from sess structure BB */
@@ -3071,7 +3061,7 @@ CIFSSMBGetCIFSACL(const int xid, struct cifsTconInfo *tcon, __u16 fid,
{
int rc = 0;
int buf_type = 0;
- QUERY_SEC_DESC_REQ * pSMB;
+ QUERY_SEC_DESC_REQ *pSMB;
struct kvec iov[1];
cFYI(1, ("GetCifsACL"));
@@ -3101,7 +3091,7 @@ CIFSSMBGetCIFSACL(const int xid, struct cifsTconInfo *tcon, __u16 fid,
if (rc) {
cFYI(1, ("Send error in QuerySecDesc = %d", rc));
} else { /* decode response */
- __le32 * parm;
+ __le32 *parm;
__u32 parm_len;
__u32 acl_len;
struct smb_com_ntransact_rsp *pSMBr;
@@ -3230,8 +3220,8 @@ int SMBQueryInformation(const int xid, struct cifsTconInfo *tcon,
FILE_ALL_INFO *pFinfo,
const struct nls_table *nls_codepage, int remap)
{
- QUERY_INFORMATION_REQ * pSMB;
- QUERY_INFORMATION_RSP * pSMBr;
+ QUERY_INFORMATION_REQ *pSMB;
+ QUERY_INFORMATION_RSP *pSMBr;
int rc = 0;
int bytes_returned;
int name_len;
@@ -3263,9 +3253,11 @@ QInfRetry:
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
if (rc) {
cFYI(1, ("Send error in QueryInfo = %d", rc));
- } else if (pFinfo) { /* decode response */
+ } else if (pFinfo) {
struct timespec ts;
__u32 time = le32_to_cpu(pSMBr->last_write_time);
+
+ /* decode response */
/* BB FIXME - add time zone adjustment BB */
memset(pFinfo, 0, sizeof(FILE_ALL_INFO));
ts.tv_nsec = 0;
@@ -3296,7 +3288,7 @@ QInfRetry:
int
CIFSSMBQPathInfo(const int xid, struct cifsTconInfo *tcon,
const unsigned char *searchName,
- FILE_ALL_INFO * pFindData,
+ FILE_ALL_INFO *pFindData,
int legacy /* old style infolevel */,
const struct nls_table *nls_codepage, int remap)
{
@@ -3371,10 +3363,12 @@ QPathInfoRetry:
else if (pFindData) {
int size;
__u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset);
- if (legacy) /* we do not read the last field, EAsize,
- fortunately since it varies by subdialect
- and on Set vs. Get, is two bytes or 4
- bytes depending but we don't care here */
+
+ /* On legacy responses we do not read the last field,
+ EAsize, fortunately since it varies by subdialect and
+ also note it differs on Set vs. Get, ie two bytes or 4
+ bytes depending but we don't care here */
+ if (legacy)
size = sizeof(FILE_INFO_STANDARD);
else
size = sizeof(FILE_ALL_INFO);
@@ -3476,85 +3470,6 @@ UnixQPathInfoRetry:
return rc;
}
-#if 0 /* function unused at present */
-int CIFSFindSingle(const int xid, struct cifsTconInfo *tcon,
- const char *searchName, FILE_ALL_INFO * findData,
- const struct nls_table *nls_codepage)
-{
-/* level 257 SMB_ */
- TRANSACTION2_FFIRST_REQ *pSMB = NULL;
- TRANSACTION2_FFIRST_RSP *pSMBr = NULL;
- int rc = 0;
- int bytes_returned;
- int name_len;
- __u16 params, byte_count;
-
- cFYI(1, ("In FindUnique"));
-findUniqueRetry:
- rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
- (void **) &pSMBr);
- if (rc)
- return rc;
-
- if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
- name_len =
- cifsConvertToUCS((__le16 *) pSMB->FileName, searchName,
- PATH_MAX, nls_codepage);
- name_len++; /* trailing null */
- name_len *= 2;
- } else { /* BB improve the check for buffer overruns BB */
- name_len = strnlen(searchName, PATH_MAX);
- name_len++; /* trailing null */
- strncpy(pSMB->FileName, searchName, name_len);
- }
-
- params = 12 + name_len /* includes null */ ;
- pSMB->TotalDataCount = 0; /* no EAs */
- pSMB->MaxParameterCount = cpu_to_le16(2);
- pSMB->MaxDataCount = cpu_to_le16(4000); /* BB find exact max SMB PDU from sess structure BB */
- pSMB->MaxSetupCount = 0;
- pSMB->Reserved = 0;
- pSMB->Flags = 0;
- pSMB->Timeout = 0;
- pSMB->Reserved2 = 0;
- pSMB->ParameterOffset = cpu_to_le16(
- offsetof(struct smb_com_transaction2_ffirst_req, InformationLevel)-4);
- pSMB->DataCount = 0;
- pSMB->DataOffset = 0;
- pSMB->SetupCount = 1; /* one byte, no need to le convert */
- pSMB->Reserved3 = 0;
- pSMB->SubCommand = cpu_to_le16(TRANS2_FIND_FIRST);
- byte_count = params + 1 /* pad */ ;
- pSMB->TotalParameterCount = cpu_to_le16(params);
- pSMB->ParameterCount = pSMB->TotalParameterCount;
- pSMB->SearchAttributes =
- cpu_to_le16(ATTR_READONLY | ATTR_HIDDEN | ATTR_SYSTEM |
- ATTR_DIRECTORY);
- pSMB->SearchCount = cpu_to_le16(16); /* BB increase */
- pSMB->SearchFlags = cpu_to_le16(1);
- pSMB->InformationLevel = cpu_to_le16(SMB_FIND_FILE_DIRECTORY_INFO);
- pSMB->SearchStorageType = 0; /* BB what should we set this to? BB */
- pSMB->hdr.smb_buf_length += byte_count;
- pSMB->ByteCount = cpu_to_le16(byte_count);
-
- rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
- (struct smb_hdr *) pSMBr, &bytes_returned, 0);
-
- if (rc) {
- cFYI(1, ("Send error in FindFileDirInfo = %d", rc));
- } else { /* decode response */
- cifs_stats_inc(&tcon->num_ffirst);
- /* BB fill in */
- }
-
- cifs_buf_release(pSMB);
- if (rc == -EAGAIN)
- goto findUniqueRetry;
-
- return rc;
-}
-#endif /* end unused (temporarily) function */
-
/* xid, tcon, searchName and codepage are input parms, rest are returned */
int
CIFSFindFirst(const int xid, struct cifsTconInfo *tcon,
@@ -3566,7 +3481,7 @@ CIFSFindFirst(const int xid, struct cifsTconInfo *tcon,
/* level 257 SMB_ */
TRANSACTION2_FFIRST_REQ *pSMB = NULL;
TRANSACTION2_FFIRST_RSP *pSMBr = NULL;
- T2_FFIRST_RSP_PARMS * parms;
+ T2_FFIRST_RSP_PARMS *parms;
int rc = 0;
int bytes_returned = 0;
int name_len;
@@ -3697,7 +3612,7 @@ int CIFSFindNext(const int xid, struct cifsTconInfo *tcon,
{
TRANSACTION2_FNEXT_REQ *pSMB = NULL;
TRANSACTION2_FNEXT_RSP *pSMBr = NULL;
- T2_FNEXT_RSP_PARMS * parms;
+ T2_FNEXT_RSP_PARMS *parms;
char *response_data;
int rc = 0;
int bytes_returned, name_len;
@@ -3836,9 +3751,9 @@ CIFSFindClose(const int xid, struct cifsTconInfo *tcon,
pSMB->FileID = searchHandle;
pSMB->ByteCount = 0;
rc = SendReceiveNoRsp(xid, tcon->ses, (struct smb_hdr *) pSMB, 0);
- if (rc) {
+ if (rc)
cERROR(1, ("Send error in FindClose = %d", rc));
- }
+
cifs_stats_inc(&tcon->num_fclose);
/* Since session is dead, search handle closed on server already */
@@ -3851,7 +3766,7 @@ CIFSFindClose(const int xid, struct cifsTconInfo *tcon,
int
CIFSGetSrvInodeNumber(const int xid, struct cifsTconInfo *tcon,
const unsigned char *searchName,
- __u64 * inode_number,
+ __u64 *inode_number,
const struct nls_table *nls_codepage, int remap)
{
int rc = 0;
@@ -4560,9 +4475,8 @@ SETFSUnixRetry:
cERROR(1, ("Send error in SETFSUnixInfo = %d", rc));
} else { /* decode response */
rc = validate_t2((struct smb_t2_rsp *)pSMBr);
- if (rc) {
+ if (rc)
rc = -EIO; /* bad smb */
- }
}
cifs_buf_release(pSMB);
@@ -4744,9 +4658,8 @@ SetEOFRetry:
pSMB->ByteCount = cpu_to_le16(byte_count);
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
- if (rc) {
+ if (rc)
cFYI(1, ("SetPathInfo (file size) returned %d", rc));
- }
cifs_buf_release(pSMB);
@@ -4897,9 +4810,8 @@ CIFSSMBSetFileTimes(const int xid, struct cifsTconInfo *tcon,
pSMB->ByteCount = cpu_to_le16(byte_count);
memcpy(data_offset, data, sizeof(FILE_BASIC_INFO));
rc = SendReceiveNoRsp(xid, tcon->ses, (struct smb_hdr *) pSMB, 0);
- if (rc) {
+ if (rc)
cFYI(1, ("Send error in Set Time (SetFileInfo) = %d", rc));
- }
/* Note: On -EAGAIN error only caller can retry on handle based calls
since file handle passed in no longer valid */
@@ -4975,9 +4887,8 @@ SetTimesRetry:
pSMB->ByteCount = cpu_to_le16(byte_count);
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
- if (rc) {
+ if (rc)
cFYI(1, ("SetPathInfo (times) returned %d", rc));
- }
cifs_buf_release(pSMB);
@@ -5027,9 +4938,8 @@ SetAttrLgcyRetry:
pSMB->ByteCount = cpu_to_le16(name_len + 1);
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
- if (rc) {
+ if (rc)
cFYI(1, ("Error in LegacySetAttr = %d", rc));
- }
cifs_buf_release(pSMB);
@@ -5138,9 +5048,8 @@ setPermsRetry:
pSMB->ByteCount = cpu_to_le16(byte_count);
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
- if (rc) {
+ if (rc)
cFYI(1, ("SetPathInfo (perms) returned %d", rc));
- }
if (pSMB)
cifs_buf_release(pSMB);
@@ -5615,9 +5524,8 @@ SetEARetry:
pSMB->ByteCount = cpu_to_le16(byte_count);
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
- if (rc) {
+ if (rc)
cFYI(1, ("SetPathInfo (EA) returned %d", rc));
- }
cifs_buf_release(pSMB);
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 65d0ba72e78..8dbfa97cd18 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -1722,8 +1722,15 @@ void reset_cifs_unix_caps(int xid, struct cifsTconInfo *tcon,
originally at mount time */
if ((saved_cap & CIFS_UNIX_POSIX_ACL_CAP) == 0)
cap &= ~CIFS_UNIX_POSIX_ACL_CAP;
- if ((saved_cap & CIFS_UNIX_POSIX_PATHNAMES_CAP) == 0)
+ if ((saved_cap & CIFS_UNIX_POSIX_PATHNAMES_CAP) == 0) {
+ if (cap & CIFS_UNIX_POSIX_PATHNAMES_CAP)
+ cERROR(1, ("POSIXPATH support change"));
cap &= ~CIFS_UNIX_POSIX_PATHNAMES_CAP;
+ } else if ((cap & CIFS_UNIX_POSIX_PATHNAMES_CAP) == 0) {
+ cERROR(1, ("possible reconnect error"));
+ cERROR(1,
+ ("server disabled POSIX path support"));
+ }
}
cap &= CIFS_UNIX_CAP_MASK;
@@ -1753,9 +1760,8 @@ void reset_cifs_unix_caps(int xid, struct cifsTconInfo *tcon,
if (sb && (CIFS_SB(sb)->rsize > 127 * 1024)) {
if ((cap & CIFS_UNIX_LARGE_READ_CAP) == 0) {
CIFS_SB(sb)->rsize = 127 * 1024;
-#ifdef CONFIG_CIFS_DEBUG2
- cFYI(1, ("larger reads not supported by srv"));
-#endif
+ cFYI(DBG2,
+ ("larger reads not supported by srv"));
}
}
@@ -1792,6 +1798,26 @@ void reset_cifs_unix_caps(int xid, struct cifsTconInfo *tcon,
}
}
+static void
+convert_delimiter(char *path, char delim)
+{
+ int i;
+ char old_delim;
+
+ if (path == NULL)
+ return;
+
+ if (delim == '/')
+ old_delim = '\\';
+ else
+ old_delim = '/';
+
+ for (i = 0; path[i] != '\0'; i++) {
+ if (path[i] == old_delim)
+ path[i] = delim;
+ }
+}
+
int
cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb,
char *mount_data, const char *devname)
@@ -2057,7 +2083,11 @@ cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb,
cifs_sb->prepath = volume_info.prepath;
if (cifs_sb->prepath) {
cifs_sb->prepathlen = strlen(cifs_sb->prepath);
- cifs_sb->prepath[0] = CIFS_DIR_SEP(cifs_sb);
+ /* we can not convert the / to \ in the path
+ separators in the prefixpath yet because we do not
+ know (until reset_cifs_unix_caps is called later)
+ whether POSIX PATH CAP is available. We normalize
+ the / to \ after reset_cifs_unix_caps is called */
volume_info.prepath = NULL;
} else
cifs_sb->prepathlen = 0;
@@ -2225,11 +2255,15 @@ cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb,
else
tcon->unix_ext = 0; /* server does not support them */
+ /* convert forward to back slashes in prepath here if needed */
+ if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS) == 0)
+ convert_delimiter(cifs_sb->prepath,
+ CIFS_DIR_SEP(cifs_sb));
+
if ((tcon->unix_ext == 0) && (cifs_sb->rsize > (1024 * 127))) {
cifs_sb->rsize = 1024 * 127;
-#ifdef CONFIG_CIFS_DEBUG2
- cFYI(1, ("no very large read support, rsize now 127K"));
-#endif
+ cFYI(DBG2,
+ ("no very large read support, rsize now 127K"));
}
if (!(tcon->ses->capabilities & CAP_LARGE_WRITE_X))
cifs_sb->wsize = min(cifs_sb->wsize,
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
index 699ec119840..4e83b47c4b3 100644
--- a/fs/cifs/dir.c
+++ b/fs/cifs/dir.c
@@ -3,7 +3,7 @@
*
* vfs operations that deal with dentries
*
- * Copyright (C) International Business Machines Corp., 2002,2007
+ * Copyright (C) International Business Machines Corp., 2002,2008
* Author(s): Steve French (sfrench@us.ibm.com)
*
* This library is free software; you can redistribute it and/or modify
@@ -111,16 +111,6 @@ cifs_bp_rename_retry:
return full_path;
}
-/* char * build_wildcard_path_from_dentry(struct dentry *direntry)
-{
- if(full_path == NULL)
- return full_path;
-
- full_path[namelen] = '\\';
- full_path[namelen+1] = '*';
- full_path[namelen+2] = 0;
-BB remove above eight lines BB */
-
/* Inode operations in similar order to how they appear in Linux file fs.h */
int
@@ -171,9 +161,8 @@ cifs_create(struct inode *inode, struct dentry *direntry, int mode,
disposition = FILE_OVERWRITE_IF;
else if ((oflags & O_CREAT) == O_CREAT)
disposition = FILE_OPEN_IF;
- else {
+ else
cFYI(1, ("Create flag not set in create function"));
- }
}
/* BB add processing to set equivalent of mode - e.g. via CreateX with
@@ -367,7 +356,7 @@ int cifs_mknod(struct inode *inode, struct dentry *direntry, int mode,
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL) {
int oplock = 0;
u16 fileHandle;
- FILE_ALL_INFO * buf;
+ FILE_ALL_INFO *buf;
cFYI(1, ("sfu compat create special file"));
@@ -534,9 +523,8 @@ cifs_d_revalidate(struct dentry *direntry, struct nameidata *nd)
int isValid = 1;
if (direntry->d_inode) {
- if (cifs_revalidate(direntry)) {
+ if (cifs_revalidate(direntry))
return 0;
- }
} else {
cFYI(1, ("neg dentry 0x%p name = %s",
direntry, direntry->d_name.name));
diff --git a/fs/cifs/dns_resolve.h b/fs/cifs/dns_resolve.h
index 073fdc3db41..966e9288930 100644
--- a/fs/cifs/dns_resolve.h
+++ b/fs/cifs/dns_resolve.h
@@ -1,7 +1,7 @@
/*
* fs/cifs/dns_resolve.h -- DNS Resolver upcall management for CIFS DFS
* Handles host name to IP address resolution
- *
+ *
* Copyright (c) International Business Machines Corp., 2008
* Author(s): Steve French (sfrench@us.ibm.com)
*
diff --git a/fs/cifs/fcntl.c b/fs/cifs/fcntl.c
index 995474c9088..7d1d5aa4c43 100644
--- a/fs/cifs/fcntl.c
+++ b/fs/cifs/fcntl.c
@@ -35,9 +35,8 @@ static __u32 convert_to_cifs_notify_flags(unsigned long fcntl_notify_flags)
/* No way on Linux VFS to ask to monitor xattr
changes (and no stream support either */
- if (fcntl_notify_flags & DN_ACCESS) {
+ if (fcntl_notify_flags & DN_ACCESS)
cifs_ntfy_flags |= FILE_NOTIFY_CHANGE_LAST_ACCESS;
- }
if (fcntl_notify_flags & DN_MODIFY) {
/* What does this mean on directories? */
cifs_ntfy_flags |= FILE_NOTIFY_CHANGE_LAST_WRITE |
@@ -47,9 +46,8 @@ static __u32 convert_to_cifs_notify_flags(unsigned long fcntl_notify_flags)
cifs_ntfy_flags |= FILE_NOTIFY_CHANGE_CREATION |
FILE_NOTIFY_CHANGE_LAST_WRITE;
}
- if (fcntl_notify_flags & DN_DELETE) {
+ if (fcntl_notify_flags & DN_DELETE)
cifs_ntfy_flags |= FILE_NOTIFY_CHANGE_LAST_WRITE;
- }
if (fcntl_notify_flags & DN_RENAME) {
/* BB review this - checking various server behaviors */
cifs_ntfy_flags |= FILE_NOTIFY_CHANGE_DIR_NAME |
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 5f7c374ae89..fa849c91d32 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -353,9 +353,9 @@ static int cifs_reopen_file(struct file *file, int can_flush)
int disposition = FILE_OPEN;
__u16 netfid;
- if (file->private_data) {
+ if (file->private_data)
pCifsFile = (struct cifsFileInfo *)file->private_data;
- } else
+ else
return -EBADF;
xid = GetXid();
@@ -499,9 +499,8 @@ int cifs_close(struct inode *inode, struct file *file)
the struct would be in each open file,
but this should give enough time to
clear the socket */
-#ifdef CONFIG_CIFS_DEBUG2
- cFYI(1, ("close delay, write pending"));
-#endif /* DEBUG2 */
+ cFYI(DBG2,
+ ("close delay, write pending"));
msleep(timeout);
timeout *= 4;
}
@@ -1423,9 +1422,8 @@ static int cifs_writepage(struct page *page, struct writeback_control *wbc)
xid = GetXid();
/* BB add check for wbc flags */
page_cache_get(page);
- if (!PageUptodate(page)) {
+ if (!PageUptodate(page))
cFYI(1, ("ppw - page not up to date"));
- }
/*
* Set the "writeback" flag, and clear "dirty" in the radix tree.
@@ -1460,9 +1458,9 @@ static int cifs_commit_write(struct file *file, struct page *page,
cFYI(1, ("commit write for page %p up to position %lld for %d",
page, position, to));
spin_lock(&inode->i_lock);
- if (position > inode->i_size) {
+ if (position > inode->i_size)
i_size_write(inode, position);
- }
+
spin_unlock(&inode->i_lock);
if (!PageUptodate(page)) {
position = ((loff_t)page->index << PAGE_CACHE_SHIFT) + offset;
@@ -1596,9 +1594,9 @@ ssize_t cifs_user_read(struct file *file, char __user *read_data,
}
open_file = (struct cifsFileInfo *)file->private_data;
- if ((file->f_flags & O_ACCMODE) == O_WRONLY) {
+ if ((file->f_flags & O_ACCMODE) == O_WRONLY)
cFYI(1, ("attempting read on write only file instance"));
- }
+
for (total_read = 0, current_offset = read_data;
read_size > total_read;
total_read += bytes_read, current_offset += bytes_read) {
@@ -1625,9 +1623,8 @@ ssize_t cifs_user_read(struct file *file, char __user *read_data,
smb_read_data +
4 /* RFC1001 length field */ +
le16_to_cpu(pSMBr->DataOffset),
- bytes_read)) {
+ bytes_read))
rc = -EFAULT;
- }
if (buf_type == CIFS_SMALL_BUFFER)
cifs_small_buf_release(smb_read_data);
@@ -1814,9 +1811,7 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
pTcon = cifs_sb->tcon;
pagevec_init(&lru_pvec, 0);
-#ifdef CONFIG_CIFS_DEBUG2
- cFYI(1, ("rpages: num pages %d", num_pages));
-#endif
+ cFYI(DBG2, ("rpages: num pages %d", num_pages));
for (i = 0; i < num_pages; ) {
unsigned contig_pages;
struct page *tmp_page;
@@ -1849,10 +1844,8 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
/* Read size needs to be in multiples of one page */
read_size = min_t(const unsigned int, read_size,
cifs_sb->rsize & PAGE_CACHE_MASK);
-#ifdef CONFIG_CIFS_DEBUG2
- cFYI(1, ("rpages: read size 0x%x contiguous pages %d",
+ cFYI(DBG2, ("rpages: read size 0x%x contiguous pages %d",
read_size, contig_pages));
-#endif
rc = -EAGAIN;
while (rc == -EAGAIN) {
if ((open_file->invalidHandle) &&
@@ -2026,7 +2019,7 @@ int is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file)
struct cifs_sb_info *cifs_sb;
cifs_sb = CIFS_SB(cifsInode->vfs_inode.i_sb);
- if ( cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO ) {
+ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
/* since no page cache to corrupt on directio
we can change size safely */
return 1;
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index b1a4a65eaa0..24eb4d39215 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -29,6 +29,130 @@
#include "cifs_debug.h"
#include "cifs_fs_sb.h"
+
+static void cifs_set_ops(struct inode *inode)
+{
+ struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
+
+ switch (inode->i_mode & S_IFMT) {
+ case S_IFREG:
+ inode->i_op = &cifs_file_inode_ops;
+ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
+ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
+ inode->i_fop = &cifs_file_direct_nobrl_ops;
+ else
+ inode->i_fop = &cifs_file_direct_ops;
+ } else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
+ inode->i_fop = &cifs_file_nobrl_ops;
+ else { /* not direct, send byte range locks */
+ inode->i_fop = &cifs_file_ops;
+ }
+
+
+ /* check if server can support readpages */
+ if (cifs_sb->tcon->ses->server->maxBuf <
+ PAGE_CACHE_SIZE + MAX_CIFS_HDR_SIZE)
+ inode->i_data.a_ops = &cifs_addr_ops_smallbuf;
+ else
+ inode->i_data.a_ops = &cifs_addr_ops;
+ break;
+ case S_IFDIR:
+ inode->i_op = &cifs_dir_inode_ops;
+ inode->i_fop = &cifs_dir_ops;
+ break;
+ case S_IFLNK:
+ inode->i_op = &cifs_symlink_inode_ops;
+ break;
+ default:
+ init_special_inode(inode, inode->i_mode, inode->i_rdev);
+ break;
+ }
+}
+
+static void cifs_unix_info_to_inode(struct inode *inode,
+ FILE_UNIX_BASIC_INFO *info, int force_uid_gid)
+{
+ struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
+ struct cifsInodeInfo *cifsInfo = CIFS_I(inode);
+ __u64 num_of_bytes = le64_to_cpu(info->NumOfBytes);
+ __u64 end_of_file = le64_to_cpu(info->EndOfFile);
+
+ inode->i_atime = cifs_NTtimeToUnix(le64_to_cpu(info->LastAccessTime));
+ inode->i_mtime =
+ cifs_NTtimeToUnix(le64_to_cpu(info->LastModificationTime));
+ inode->i_ctime = cifs_NTtimeToUnix(le64_to_cpu(info->LastStatusChange));
+ inode->i_mode = le64_to_cpu(info->Permissions);
+
+ /*
+ * Since we set the inode type below we need to mask off
+ * to avoid strange results if bits set above.
+ */
+ inode->i_mode &= ~S_IFMT;
+ switch (le32_to_cpu(info->Type)) {
+ case UNIX_FILE:
+ inode->i_mode |= S_IFREG;
+ break;
+ case UNIX_SYMLINK:
+ inode->i_mode |= S_IFLNK;
+ break;
+ case UNIX_DIR:
+ inode->i_mode |= S_IFDIR;
+ break;
+ case UNIX_CHARDEV:
+ inode->i_mode |= S_IFCHR;
+ inode->i_rdev = MKDEV(le64_to_cpu(info->DevMajor),
+ le64_to_cpu(info->DevMinor) & MINORMASK);
+ break;
+ case UNIX_BLOCKDEV:
+ inode->i_mode |= S_IFBLK;
+ inode->i_rdev = MKDEV(le64_to_cpu(info->DevMajor),
+ le64_to_cpu(info->DevMinor) & MINORMASK);
+ break;
+ case UNIX_FIFO:
+ inode->i_mode |= S_IFIFO;
+ break;
+ case UNIX_SOCKET:
+ inode->i_mode |= S_IFSOCK;
+ break;
+ default:
+ /* safest to call it a file if we do not know */
+ inode->i_mode |= S_IFREG;
+ cFYI(1, ("unknown type %d", le32_to_cpu(info->Type)));
+ break;
+ }
+
+ if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID) &&
+ !force_uid_gid)
+ inode->i_uid = cifs_sb->mnt_uid;
+ else
+ inode->i_uid = le64_to_cpu(info->Uid);
+
+ if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_GID) &&
+ !force_uid_gid)
+ inode->i_gid = cifs_sb->mnt_gid;
+ else
+ inode->i_gid = le64_to_cpu(info->Gid);
+
+ inode->i_nlink = le64_to_cpu(info->Nlinks);
+
+ spin_lock(&inode->i_lock);
+ if (is_size_safe_to_change(cifsInfo, end_of_file)) {
+ /*
+ * We can not safely change the file size here if the client
+ * is writing to it due to potential races.
+ */
+ i_size_write(inode, end_of_file);
+
+ /*
+ * i_blocks is not related to (i_size / i_blksize),
+ * but instead 512 byte (2**9) size is required for
+ * calculating num blocks.
+ */
+ inode->i_blocks = (512 - 1 + num_of_bytes) >> 9;
+ }
+ spin_unlock(&inode->i_lock);
+}
+
int cifs_get_inode_info_unix(struct inode **pinode,
const unsigned char *search_path, struct super_block *sb, int xid)
{
@@ -74,7 +198,6 @@ int cifs_get_inode_info_unix(struct inode **pinode,
}
} else {
struct cifsInodeInfo *cifsInfo;
- __u32 type = le32_to_cpu(findData.Type);
__u64 num_of_bytes = le64_to_cpu(findData.NumOfBytes);
__u64 end_of_file = le64_to_cpu(findData.EndOfFile);
@@ -105,112 +228,16 @@ int cifs_get_inode_info_unix(struct inode **pinode,
/* this is ok to set on every inode revalidate */
atomic_set(&cifsInfo->inUse, 1);
- inode->i_atime =
- cifs_NTtimeToUnix(le64_to_cpu(findData.LastAccessTime));
- inode->i_mtime =
- cifs_NTtimeToUnix(le64_to_cpu
- (findData.LastModificationTime));
- inode->i_ctime =
- cifs_NTtimeToUnix(le64_to_cpu(findData.LastStatusChange));
- inode->i_mode = le64_to_cpu(findData.Permissions);
- /* since we set the inode type below we need to mask off
- to avoid strange results if bits set above */
- inode->i_mode &= ~S_IFMT;
- if (type == UNIX_FILE) {
- inode->i_mode |= S_IFREG;
- } else if (type == UNIX_SYMLINK) {
- inode->i_mode |= S_IFLNK;
- } else if (type == UNIX_DIR) {
- inode->i_mode |= S_IFDIR;
- } else if (type == UNIX_CHARDEV) {
- inode->i_mode |= S_IFCHR;
- inode->i_rdev = MKDEV(le64_to_cpu(findData.DevMajor),
- le64_to_cpu(findData.DevMinor) & MINORMASK);
- } else if (type == UNIX_BLOCKDEV) {
- inode->i_mode |= S_IFBLK;
- inode->i_rdev = MKDEV(le64_to_cpu(findData.DevMajor),
- le64_to_cpu(findData.DevMinor) & MINORMASK);
- } else if (type == UNIX_FIFO) {
- inode->i_mode |= S_IFIFO;
- } else if (type == UNIX_SOCKET) {
- inode->i_mode |= S_IFSOCK;
- } else {
- /* safest to call it a file if we do not know */
- inode->i_mode |= S_IFREG;
- cFYI(1, ("unknown type %d", type));
- }
+ cifs_unix_info_to_inode(inode, &findData, 0);
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID)
- inode->i_uid = cifs_sb->mnt_uid;
- else
- inode->i_uid = le64_to_cpu(findData.Uid);
-
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_GID)
- inode->i_gid = cifs_sb->mnt_gid;
- else
- inode->i_gid = le64_to_cpu(findData.Gid);
-
- inode->i_nlink = le64_to_cpu(findData.Nlinks);
-
- spin_lock(&inode->i_lock);
- if (is_size_safe_to_change(cifsInfo, end_of_file)) {
- /* can not safely change the file size here if the
- client is writing to it due to potential races */
- i_size_write(inode, end_of_file);
-
- /* blksize needs to be multiple of two. So safer to default to
- blksize and blkbits set in superblock so 2**blkbits and blksize
- will match rather than setting to:
- (pTcon->ses->server->maxBuf - MAX_CIFS_HDR_SIZE) & 0xFFFFFE00;*/
-
- /* This seems incredibly stupid but it turns out that i_blocks
- is not related to (i_size / i_blksize), instead 512 byte size
- is required for calculating num blocks */
-
- /* 512 bytes (2**9) is the fake blocksize that must be used */
- /* for this calculation */
- inode->i_blocks = (512 - 1 + num_of_bytes) >> 9;
- }
- spin_unlock(&inode->i_lock);
if (num_of_bytes < end_of_file)
cFYI(1, ("allocation size less than end of file"));
cFYI(1, ("Size %ld and blocks %llu",
(unsigned long) inode->i_size,
(unsigned long long)inode->i_blocks));
- if (S_ISREG(inode->i_mode)) {
- cFYI(1, ("File inode"));
- inode->i_op = &cifs_file_inode_ops;
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
- inode->i_fop =
- &cifs_file_direct_nobrl_ops;
- else
- inode->i_fop = &cifs_file_direct_ops;
- } else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
- inode->i_fop = &cifs_file_nobrl_ops;
- else /* not direct, send byte range locks */
- inode->i_fop = &cifs_file_ops;
-
- /* check if server can support readpages */
- if (pTcon->ses->server->maxBuf <
- PAGE_CACHE_SIZE + MAX_CIFS_HDR_SIZE)
- inode->i_data.a_ops = &cifs_addr_ops_smallbuf;
- else
- inode->i_data.a_ops = &cifs_addr_ops;
- } else if (S_ISDIR(inode->i_mode)) {
- cFYI(1, ("Directory inode"));
- inode->i_op = &cifs_dir_inode_ops;
- inode->i_fop = &cifs_dir_ops;
- } else if (S_ISLNK(inode->i_mode)) {
- cFYI(1, ("Symbolic Link inode"));
- inode->i_op = &cifs_symlink_inode_ops;
- /* tmp_inode->i_fop = */ /* do not need to set to anything */
- } else {
- cFYI(1, ("Init special inode"));
- init_special_inode(inode, inode->i_mode,
- inode->i_rdev);
- }
+
+ cifs_set_ops(inode);
}
return rc;
}
@@ -490,9 +517,9 @@ int cifs_get_inode_info(struct inode **pinode,
if (decode_sfu_inode(inode,
le64_to_cpu(pfindData->EndOfFile),
search_path,
- cifs_sb, xid)) {
+ cifs_sb, xid))
cFYI(1, ("Unrecognized sfu inode type"));
- }
+
cFYI(1, ("sfu mode 0%o", inode->i_mode));
} else {
inode->i_mode |= S_IFREG;
@@ -546,36 +573,7 @@ int cifs_get_inode_info(struct inode **pinode,
atomic_set(&cifsInfo->inUse, 1);
}
- if (S_ISREG(inode->i_mode)) {
- cFYI(1, ("File inode"));
- inode->i_op = &cifs_file_inode_ops;
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
- inode->i_fop =
- &cifs_file_direct_nobrl_ops;
- else
- inode->i_fop = &cifs_file_direct_ops;
- } else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
- inode->i_fop = &cifs_file_nobrl_ops;
- else /* not direct, send byte range locks */
- inode->i_fop = &cifs_file_ops;
-
- if (pTcon->ses->server->maxBuf <
- PAGE_CACHE_SIZE + MAX_CIFS_HDR_SIZE)
- inode->i_data.a_ops = &cifs_addr_ops_smallbuf;
- else
- inode->i_data.a_ops = &cifs_addr_ops;
- } else if (S_ISDIR(inode->i_mode)) {
- cFYI(1, ("Directory inode"));
- inode->i_op = &cifs_dir_inode_ops;
- inode->i_fop = &cifs_dir_ops;
- } else if (S_ISLNK(inode->i_mode)) {
- cFYI(1, ("Symbolic Link inode"));
- inode->i_op = &cifs_symlink_inode_ops;
- } else {
- init_special_inode(inode, inode->i_mode,
- inode->i_rdev);
- }
+ cifs_set_ops(inode);
}
kfree(buf);
return rc;
@@ -792,17 +790,12 @@ psx_del_no_retry:
}
static void posix_fill_in_inode(struct inode *tmp_inode,
- FILE_UNIX_BASIC_INFO *pData, int *pobject_type, int isNewInode)
+ FILE_UNIX_BASIC_INFO *pData, int isNewInode)
{
+ struct cifsInodeInfo *cifsInfo = CIFS_I(tmp_inode);
loff_t local_size;
struct timespec local_mtime;
- struct cifsInodeInfo *cifsInfo = CIFS_I(tmp_inode);
- struct cifs_sb_info *cifs_sb = CIFS_SB(tmp_inode->i_sb);
-
- __u32 type = le32_to_cpu(pData->Type);
- __u64 num_of_bytes = le64_to_cpu(pData->NumOfBytes);
- __u64 end_of_file = le64_to_cpu(pData->EndOfFile);
cifsInfo->time = jiffies;
atomic_inc(&cifsInfo->inUse);
@@ -810,115 +803,27 @@ static void posix_fill_in_inode(struct inode *tmp_inode,
local_mtime = tmp_inode->i_mtime;
local_size = tmp_inode->i_size;
- tmp_inode->i_atime =
- cifs_NTtimeToUnix(le64_to_cpu(pData->LastAccessTime));
- tmp_inode->i_mtime =
- cifs_NTtimeToUnix(le64_to_cpu(pData->LastModificationTime));
- tmp_inode->i_ctime =
- cifs_NTtimeToUnix(le64_to_cpu(pData->LastStatusChange));
-
- tmp_inode->i_mode = le64_to_cpu(pData->Permissions);
- /* since we set the inode type below we need to mask off type
- to avoid strange results if bits above were corrupt */
- tmp_inode->i_mode &= ~S_IFMT;
- if (type == UNIX_FILE) {
- *pobject_type = DT_REG;
- tmp_inode->i_mode |= S_IFREG;
- } else if (type == UNIX_SYMLINK) {
- *pobject_type = DT_LNK;
- tmp_inode->i_mode |= S_IFLNK;
- } else if (type == UNIX_DIR) {
- *pobject_type = DT_DIR;
- tmp_inode->i_mode |= S_IFDIR;
- } else if (type == UNIX_CHARDEV) {
- *pobject_type = DT_CHR;
- tmp_inode->i_mode |= S_IFCHR;
- tmp_inode->i_rdev = MKDEV(le64_to_cpu(pData->DevMajor),
- le64_to_cpu(pData->DevMinor) & MINORMASK);
- } else if (type == UNIX_BLOCKDEV) {
- *pobject_type = DT_BLK;
- tmp_inode->i_mode |= S_IFBLK;
- tmp_inode->i_rdev = MKDEV(le64_to_cpu(pData->DevMajor),
- le64_to_cpu(pData->DevMinor) & MINORMASK);
- } else if (type == UNIX_FIFO) {
- *pobject_type = DT_FIFO;
- tmp_inode->i_mode |= S_IFIFO;
- } else if (type == UNIX_SOCKET) {
- *pobject_type = DT_SOCK;
- tmp_inode->i_mode |= S_IFSOCK;
- } else {
- /* safest to just call it a file */
- *pobject_type = DT_REG;
- tmp_inode->i_mode |= S_IFREG;
- cFYI(1, ("unknown inode type %d", type));
- }
-
-#ifdef CONFIG_CIFS_DEBUG2
- cFYI(1, ("object type: %d", type));
-#endif
- tmp_inode->i_uid = le64_to_cpu(pData->Uid);
- tmp_inode->i_gid = le64_to_cpu(pData->Gid);
- tmp_inode->i_nlink = le64_to_cpu(pData->Nlinks);
-
- spin_lock(&tmp_inode->i_lock);
- if (is_size_safe_to_change(cifsInfo, end_of_file)) {
- /* can not safely change the file size here if the
- client is writing to it due to potential races */
- i_size_write(tmp_inode, end_of_file);
+ cifs_unix_info_to_inode(tmp_inode, pData, 1);
+ cifs_set_ops(tmp_inode);
- /* 512 bytes (2**9) is the fake blocksize that must be used */
- /* for this calculation, not the real blocksize */
- tmp_inode->i_blocks = (512 - 1 + num_of_bytes) >> 9;
- }
- spin_unlock(&tmp_inode->i_lock);
-
- if (S_ISREG(tmp_inode->i_mode)) {
- cFYI(1, ("File inode"));
- tmp_inode->i_op = &cifs_file_inode_ops;
-
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
- tmp_inode->i_fop = &cifs_file_direct_nobrl_ops;
- else
- tmp_inode->i_fop = &cifs_file_direct_ops;
+ if (!S_ISREG(tmp_inode->i_mode))
+ return;
- } else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
- tmp_inode->i_fop = &cifs_file_nobrl_ops;
- else
- tmp_inode->i_fop = &cifs_file_ops;
-
- if ((cifs_sb->tcon) && (cifs_sb->tcon->ses) &&
- (cifs_sb->tcon->ses->server->maxBuf <
- PAGE_CACHE_SIZE + MAX_CIFS_HDR_SIZE))
- tmp_inode->i_data.a_ops = &cifs_addr_ops_smallbuf;
- else
- tmp_inode->i_data.a_ops = &cifs_addr_ops;
-
- if (isNewInode)
- return; /* No sense invalidating pages for new inode
- since we we have not started caching
- readahead file data yet */
+ /*
+ * No sense invalidating pages for new inode
+ * since we we have not started caching
+ * readahead file data yet.
+ */
+ if (isNewInode)
+ return;
- if (timespec_equal(&tmp_inode->i_mtime, &local_mtime) &&
- (local_size == tmp_inode->i_size)) {
- cFYI(1, ("inode exists but unchanged"));
- } else {
- /* file may have changed on server */
- cFYI(1, ("invalidate inode, readdir detected change"));
- invalidate_remote_inode(tmp_inode);
- }
- } else if (S_ISDIR(tmp_inode->i_mode)) {
- cFYI(1, ("Directory inode"));
- tmp_inode->i_op = &cifs_dir_inode_ops;
- tmp_inode->i_fop = &cifs_dir_ops;
- } else if (S_ISLNK(tmp_inode->i_mode)) {
- cFYI(1, ("Symbolic Link inode"));
- tmp_inode->i_op = &cifs_symlink_inode_ops;
-/* tmp_inode->i_fop = *//* do not need to set to anything */
+ if (timespec_equal(&tmp_inode->i_mtime, &local_mtime) &&
+ (local_size == tmp_inode->i_size)) {
+ cFYI(1, ("inode exists but unchanged"));
} else {
- cFYI(1, ("Special inode"));
- init_special_inode(tmp_inode, tmp_inode->i_mode,
- tmp_inode->i_rdev);
+ /* file may have changed on server */
+ cFYI(1, ("invalidate inode, readdir detected change"));
+ invalidate_remote_inode(tmp_inode);
}
}
@@ -968,7 +873,6 @@ int cifs_mkdir(struct inode *inode, struct dentry *direntry, int mode)
cFYI(1, ("posix mkdir returned 0x%x", rc));
d_drop(direntry);
} else {
- int obj_type;
if (pInfo->Type == cpu_to_le32(-1)) {
/* no return info, go query for it */
kfree(pInfo);
@@ -1004,7 +908,7 @@ int cifs_mkdir(struct inode *inode, struct dentry *direntry, int mode)
/* we already checked in POSIXCreate whether
frame was long enough */
posix_fill_in_inode(direntry->d_inode,
- pInfo, &obj_type, 1 /* NewInode */);
+ pInfo, 1 /* NewInode */);
#ifdef CONFIG_CIFS_DEBUG2
cFYI(1, ("instantiated dentry %p %s to inode %p",
direntry, direntry->d_name.name, newinode));
@@ -1214,9 +1118,8 @@ int cifs_rename(struct inode *source_inode, struct dentry *source_direntry,
} /* if we can not get memory just leave rc as EEXIST */
}
- if (rc) {
+ if (rc)
cFYI(1, ("rename rc %d", rc));
- }
if ((rc == -EIO) || (rc == -EEXIST)) {
int oplock = FALSE;
diff --git a/fs/cifs/ioctl.c b/fs/cifs/ioctl.c
index d24fe6880a0..5c792df13d6 100644
--- a/fs/cifs/ioctl.c
+++ b/fs/cifs/ioctl.c
@@ -30,7 +30,7 @@
#define CIFS_IOC_CHECKUMOUNT _IO(0xCF, 2)
-int cifs_ioctl (struct inode *inode, struct file *filep,
+int cifs_ioctl(struct inode *inode, struct file *filep,
unsigned int command, unsigned long arg)
{
int rc = -ENOTTY; /* strange error - but the precedent */
diff --git a/fs/cifs/md4.c b/fs/cifs/md4.c
index a2415c1a14d..a725c2609d6 100644
--- a/fs/cifs/md4.c
+++ b/fs/cifs/md4.c
@@ -56,7 +56,7 @@ lshift(__u32 x, int s)
/* this applies md4 to 64 byte chunks */
static void
-mdfour64(__u32 * M, __u32 * A, __u32 *B, __u32 * C, __u32 *D)
+mdfour64(__u32 *M, __u32 *A, __u32 *B, __u32 *C, __u32 *D)
{
int j;
__u32 AA, BB, CC, DD;
@@ -137,7 +137,7 @@ mdfour64(__u32 * M, __u32 * A, __u32 *B, __u32 * C, __u32 *D)
}
static void
-copy64(__u32 * M, unsigned char *in)
+copy64(__u32 *M, unsigned char *in)
{
int i;
diff --git a/fs/cifs/md5.c b/fs/cifs/md5.c
index f13f96d42fc..462bbfefd4b 100644
--- a/fs/cifs/md5.c
+++ b/fs/cifs/md5.c
@@ -161,7 +161,7 @@ MD5Final(unsigned char digest[16], struct MD5Context *ctx)
/* This is the central step in the MD5 algorithm. */
#define MD5STEP(f, w, x, y, z, data, s) \
- ( w += f(x, y, z) + data, w = w<<s | w>>(32-s), w += x )
+ (w += f(x, y, z) + data, w = w<<s | w>>(32-s), w += x)
/*
* The core of the MD5 algorithm, this alters an existing MD5 hash to
@@ -302,9 +302,8 @@ hmac_md5_init_limK_to_64(const unsigned char *key, int key_len,
int i;
/* if key is longer than 64 bytes truncate it */
- if (key_len > 64) {
+ if (key_len > 64)
key_len = 64;
- }
/* start out by storing key in pads */
memset(ctx->k_ipad, 0, sizeof(ctx->k_ipad));
@@ -359,9 +358,9 @@ hmac_md5(unsigned char key[16], unsigned char *data, int data_len,
{
struct HMACMD5Context ctx;
hmac_md5_init_limK_to_64(key, 16, &ctx);
- if (data_len != 0) {
+ if (data_len != 0)
hmac_md5_update(data, data_len, &ctx);
- }
+
hmac_md5_final(digest, &ctx);
}
#endif
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
index 15546c2354c..2a42d9fedbb 100644
--- a/fs/cifs/misc.c
+++ b/fs/cifs/misc.c
@@ -1,7 +1,7 @@
/*
* fs/cifs/misc.c
*
- * Copyright (C) International Business Machines Corp., 2002,2007
+ * Copyright (C) International Business Machines Corp., 2002,2008
* Author(s): Steve French (sfrench@us.ibm.com)
*
* This library is free software; you can redistribute it and/or modify
@@ -320,9 +320,9 @@ header_assemble(struct smb_hdr *buffer, char smb_command /* command */ ,
if (treeCon->ses) {
if (treeCon->ses->capabilities & CAP_UNICODE)
buffer->Flags2 |= SMBFLG2_UNICODE;
- if (treeCon->ses->capabilities & CAP_STATUS32) {
+ if (treeCon->ses->capabilities & CAP_STATUS32)
buffer->Flags2 |= SMBFLG2_ERR_STATUS;
- }
+
/* Uid is not converted */
buffer->Uid = treeCon->ses->Suid;
buffer->Mid = GetNextMid(treeCon->ses->server);
@@ -610,7 +610,8 @@ dump_smb(struct smb_hdr *smb_buf, int smb_buf_length)
buffer = (unsigned char *) smb_buf;
for (i = 0, j = 0; i < smb_buf_length; i++, j++) {
- if (i % 8 == 0) { /* have reached the beginning of line */
+ if (i % 8 == 0) {
+ /* have reached the beginning of line */
printk(KERN_DEBUG "| ");
j = 0;
}
@@ -621,7 +622,8 @@ dump_smb(struct smb_hdr *smb_buf, int smb_buf_length)
else
debug_line[1 + (2 * j)] = '_';
- if (i % 8 == 7) { /* reached end of line, time to print ascii */
+ if (i % 8 == 7) {
+ /* reached end of line, time to print ascii */
debug_line[16] = 0;
printk(" | %s\n", debug_line);
}
@@ -631,7 +633,7 @@ dump_smb(struct smb_hdr *smb_buf, int smb_buf_length)
debug_line[2 * j] = ' ';
debug_line[1 + (2 * j)] = ' ';
}
- printk( " | %s\n", debug_line);
+ printk(" | %s\n", debug_line);
return;
}
diff --git a/fs/cifs/netmisc.c b/fs/cifs/netmisc.c
index 646e1f06941..3b5a5ce882b 100644
--- a/fs/cifs/netmisc.c
+++ b/fs/cifs/netmisc.c
@@ -1,7 +1,7 @@
/*
* fs/cifs/netmisc.c
*
- * Copyright (c) International Business Machines Corp., 2002
+ * Copyright (c) International Business Machines Corp., 2002,2008
* Author(s): Steve French (sfrench@us.ibm.com)
*
* Error mapping routines from Samba libsmb/errormap.c
@@ -150,9 +150,7 @@ static int canonicalize_unc(char *cp)
if (cp[i] == '\\')
break;
if (cp[i] == '/') {
-#ifdef CONFIG_CIFS_DEBUG2
- cFYI(1, ("change slash to backslash in malformed UNC"));
-#endif
+ cFYI(DBG2, ("change slash to \\ in malformed UNC"));
cp[i] = '\\';
return 1;
}
@@ -178,9 +176,7 @@ cifs_inet_pton(int address_family, char *cp, void *dst)
} else if (address_family == AF_INET6) {
ret = in6_pton(cp, -1 /* len */, dst , '\\', NULL);
}
-#ifdef CONFIG_CIFS_DEBUG2
- cFYI(1, ("address conversion returned %d for %s", ret, cp));
-#endif
+ cFYI(DBG2, ("address conversion returned %d for %s", ret, cp));
if (ret > 0)
ret = 1;
return ret;
@@ -253,7 +249,8 @@ static const struct {
ERRDOS, 87, NT_STATUS_INVALID_PARAMETER_MIX}, {
ERRHRD, ERRgeneral, NT_STATUS_INVALID_QUOTA_LOWER}, {
ERRHRD, ERRgeneral, NT_STATUS_DISK_CORRUPT_ERROR}, {
- ERRDOS, ERRbadfile, NT_STATUS_OBJECT_NAME_INVALID}, { /* mapping changed since shell does lookup on * and expects file not found */
+ /* mapping changed since shell does lookup on * expects FileNotFound */
+ ERRDOS, ERRbadfile, NT_STATUS_OBJECT_NAME_INVALID}, {
ERRDOS, ERRbadfile, NT_STATUS_OBJECT_NAME_NOT_FOUND}, {
ERRDOS, ERRalreadyexists, NT_STATUS_OBJECT_NAME_COLLISION}, {
ERRHRD, ERRgeneral, NT_STATUS_HANDLE_NOT_WAITABLE}, {
@@ -820,7 +817,8 @@ map_smb_to_linux_error(struct smb_hdr *smb, int logErr)
/* old style errors */
/* DOS class smb error codes - map DOS */
- if (smberrclass == ERRDOS) { /* 1 byte field no need to byte reverse */
+ if (smberrclass == ERRDOS) {
+ /* 1 byte field no need to byte reverse */
for (i = 0;
i <
sizeof(mapping_table_ERRDOS) /
@@ -834,7 +832,8 @@ map_smb_to_linux_error(struct smb_hdr *smb, int logErr)
}
/* else try next error mapping one to see if match */
}
- } else if (smberrclass == ERRSRV) { /* server class of error codes */
+ } else if (smberrclass == ERRSRV) {
+ /* server class of error codes */
for (i = 0;
i <
sizeof(mapping_table_ERRSRV) /
@@ -922,8 +921,8 @@ struct timespec cnvrtDosUnixTm(__u16 date, __u16 time)
{
struct timespec ts;
int sec, min, days, month, year;
- SMB_TIME * st = (SMB_TIME *)&time;
- SMB_DATE * sd = (SMB_DATE *)&date;
+ SMB_TIME *st = (SMB_TIME *)&time;
+ SMB_DATE *sd = (SMB_DATE *)&date;
cFYI(1, ("date %d time %d", date, time));
diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
index 0f22def4bdf..32b445edc88 100644
--- a/fs/cifs/readdir.c
+++ b/fs/cifs/readdir.c
@@ -3,7 +3,7 @@
*
* Directory search handling
*
- * Copyright (C) International Business Machines Corp., 2004, 2007
+ * Copyright (C) International Business Machines Corp., 2004, 2008
* Author(s): Steve French (sfrench@us.ibm.com)
*
* This library is free software; you can redistribute it and/or modify
@@ -42,17 +42,18 @@ static void dump_cifs_file_struct(struct file *file, char *label)
cFYI(1, ("empty cifs private file data"));
return;
}
- if (cf->invalidHandle) {
+ if (cf->invalidHandle)
cFYI(1, ("invalid handle"));
- }
- if (cf->srch_inf.endOfSearch) {
+ if (cf->srch_inf.endOfSearch)
cFYI(1, ("end of search"));
- }
- if (cf->srch_inf.emptyDir) {
+ if (cf->srch_inf.emptyDir)
cFYI(1, ("empty dir"));
- }
}
}
+#else
+static inline void dump_cifs_file_struct(struct file *file, char *label)
+{
+}
#endif /* DEBUG2 */
/* Returns one if new inode created (which therefore needs to be hashed) */
@@ -150,7 +151,7 @@ static void fill_in_inode(struct inode *tmp_inode, int new_buf_type,
cifs_NTtimeToUnix(le64_to_cpu(pfindData->ChangeTime));
} else { /* legacy, OS2 and DOS style */
/* struct timespec ts;*/
- FIND_FILE_STANDARD_INFO * pfindData =
+ FIND_FILE_STANDARD_INFO *pfindData =
(FIND_FILE_STANDARD_INFO *)buf;
tmp_inode->i_mtime = cnvrtDosUnixTm(
@@ -198,9 +199,8 @@ static void fill_in_inode(struct inode *tmp_inode, int new_buf_type,
if (attr & ATTR_DIRECTORY) {
*pobject_type = DT_DIR;
/* override default perms since we do not lock dirs */
- if (atomic_read(&cifsInfo->inUse) == 0) {
+ if (atomic_read(&cifsInfo->inUse) == 0)
tmp_inode->i_mode = cifs_sb->mnt_dir_mode;
- }
tmp_inode->i_mode |= S_IFDIR;
} else if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL) &&
(attr & ATTR_SYSTEM)) {
@@ -231,9 +231,8 @@ static void fill_in_inode(struct inode *tmp_inode, int new_buf_type,
} /* could add code here - to validate if device or weird share type? */
/* can not fill in nlink here as in qpathinfo version and Unx search */
- if (atomic_read(&cifsInfo->inUse) == 0) {
+ if (atomic_read(&cifsInfo->inUse) == 0)
atomic_set(&cifsInfo->inUse, 1);
- }
spin_lock(&tmp_inode->i_lock);
if (is_size_safe_to_change(cifsInfo, end_of_file)) {
@@ -461,9 +460,8 @@ static int initiate_cifs_search(const int xid, struct file *file)
full_path = build_path_from_dentry(file->f_path.dentry);
- if (full_path == NULL) {
+ if (full_path == NULL)
return -ENOMEM;
- }
cFYI(1, ("Full path: %s start at: %lld", full_path, file->f_pos));
@@ -471,9 +469,9 @@ ffirst_retry:
/* test for Unix extensions */
/* but now check for them on the share/mount not on the SMB session */
/* if (pTcon->ses->capabilities & CAP_UNIX) { */
- if (pTcon->unix_ext) {
+ if (pTcon->unix_ext)
cifsFile->srch_inf.info_level = SMB_FIND_FILE_UNIX;
- } else if ((pTcon->ses->capabilities &
+ else if ((pTcon->ses->capabilities &
(CAP_NT_SMBS | CAP_NT_FIND)) == 0) {
cifsFile->srch_inf.info_level = SMB_FIND_FILE_INFO_STANDARD;
} else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
@@ -514,10 +512,10 @@ static int cifs_unicode_bytelen(char *str)
static char *nxt_dir_entry(char *old_entry, char *end_of_smb, int level)
{
char *new_entry;
- FILE_DIRECTORY_INFO * pDirInfo = (FILE_DIRECTORY_INFO *)old_entry;
+ FILE_DIRECTORY_INFO *pDirInfo = (FILE_DIRECTORY_INFO *)old_entry;
if (level == SMB_FIND_FILE_INFO_STANDARD) {
- FIND_FILE_STANDARD_INFO * pfData;
+ FIND_FILE_STANDARD_INFO *pfData;
pfData = (FIND_FILE_STANDARD_INFO *)pDirInfo;
new_entry = old_entry + sizeof(FIND_FILE_STANDARD_INFO) +
@@ -553,7 +551,7 @@ static int cifs_entry_is_dot(char *current_entry, struct cifsFileInfo *cfile)
int len = 0;
if (cfile->srch_inf.info_level == SMB_FIND_FILE_UNIX) {
- FILE_UNIX_INFO * pFindData = (FILE_UNIX_INFO *)current_entry;
+ FILE_UNIX_INFO *pFindData = (FILE_UNIX_INFO *)current_entry;
filename = &pFindData->FileName[0];
if (cfile->srch_inf.unicode) {
len = cifs_unicode_bytelen(filename);
@@ -562,30 +560,30 @@ static int cifs_entry_is_dot(char *current_entry, struct cifsFileInfo *cfile)
len = strnlen(filename, 5);
}
} else if (cfile->srch_inf.info_level == SMB_FIND_FILE_DIRECTORY_INFO) {
- FILE_DIRECTORY_INFO * pFindData =
+ FILE_DIRECTORY_INFO *pFindData =
(FILE_DIRECTORY_INFO *)current_entry;
filename = &pFindData->FileName[0];
len = le32_to_cpu(pFindData->FileNameLength);
} else if (cfile->srch_inf.info_level ==
SMB_FIND_FILE_FULL_DIRECTORY_INFO) {
- FILE_FULL_DIRECTORY_INFO * pFindData =
+ FILE_FULL_DIRECTORY_INFO *pFindData =
(FILE_FULL_DIRECTORY_INFO *)current_entry;
filename = &pFindData->FileName[0];
len = le32_to_cpu(pFindData->FileNameLength);
} else if (cfile->srch_inf.info_level ==
SMB_FIND_FILE_ID_FULL_DIR_INFO) {
- SEARCH_ID_FULL_DIR_INFO * pFindData =
+ SEARCH_ID_FULL_DIR_INFO *pFindData =
(SEARCH_ID_FULL_DIR_INFO *)current_entry;
filename = &pFindData->FileName[0];
len = le32_to_cpu(pFindData->FileNameLength);
} else if (cfile->srch_inf.info_level ==
SMB_FIND_FILE_BOTH_DIRECTORY_INFO) {
- FILE_BOTH_DIRECTORY_INFO * pFindData =
+ FILE_BOTH_DIRECTORY_INFO *pFindData =
(FILE_BOTH_DIRECTORY_INFO *)current_entry;
filename = &pFindData->FileName[0];
len = le32_to_cpu(pFindData->FileNameLength);
} else if (cfile->srch_inf.info_level == SMB_FIND_FILE_INFO_STANDARD) {
- FIND_FILE_STANDARD_INFO * pFindData =
+ FIND_FILE_STANDARD_INFO *pFindData =
(FIND_FILE_STANDARD_INFO *)current_entry;
filename = &pFindData->FileName[0];
len = pFindData->FileNameLength;
@@ -666,9 +664,7 @@ static int find_cifs_entry(const int xid, struct cifsTconInfo *pTcon,
. and .. for the root of a drive and for those we need
to start two entries earlier */
-#ifdef CONFIG_CIFS_DEBUG2
dump_cifs_file_struct(file, "In fce ");
-#endif
if (((index_to_find < cifsFile->srch_inf.index_of_last_entry) &&
is_dir_changed(file)) ||
(index_to_find < first_entry_in_buffer)) {
@@ -718,7 +714,7 @@ static int find_cifs_entry(const int xid, struct cifsTconInfo *pTcon,
pos_in_buf = index_to_find - first_entry_in_buffer;
cFYI(1, ("found entry - pos_in_buf %d", pos_in_buf));
- for (i=0; (i < (pos_in_buf)) && (current_entry != NULL); i++) {
+ for (i = 0; (i < (pos_in_buf)) && (current_entry != NULL); i++) {
/* go entry by entry figuring out which is first */
current_entry = nxt_dir_entry(current_entry, end_of_smb,
cifsFile->srch_inf.info_level);
@@ -793,7 +789,7 @@ static int cifs_get_name_from_search_buf(struct qstr *pqst,
filename = &pFindData->FileName[0];
len = le32_to_cpu(pFindData->FileNameLength);
} else if (level == SMB_FIND_FILE_INFO_STANDARD) {
- FIND_FILE_STANDARD_INFO * pFindData =
+ FIND_FILE_STANDARD_INFO *pFindData =
(FIND_FILE_STANDARD_INFO *)current_entry;
filename = &pFindData->FileName[0];
/* one byte length, no name conversion */
@@ -928,7 +924,7 @@ static int cifs_save_resume_key(const char *current_entry,
level = cifsFile->srch_inf.info_level;
if (level == SMB_FIND_FILE_UNIX) {
- FILE_UNIX_INFO * pFindData = (FILE_UNIX_INFO *)current_entry;
+ FILE_UNIX_INFO *pFindData = (FILE_UNIX_INFO *)current_entry;
filename = &pFindData->FileName[0];
if (cifsFile->srch_inf.unicode) {
diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
index d2153abcba6..ed150efbe27 100644
--- a/fs/cifs/sess.c
+++ b/fs/cifs/sess.c
@@ -417,10 +417,6 @@ CIFS_SessSetup(unsigned int xid, struct cifsSesInfo *ses, int first_time,
calc_lanman_hash(ses, lnm_session_key);
ses->flags |= CIFS_SES_LANMAN;
-/* #ifdef CONFIG_CIFS_DEBUG2
- cifs_dump_mem("cryptkey: ",ses->server->cryptKey,
- CIFS_SESS_KEY_SIZE);
-#endif */
memcpy(bcc_ptr, (char *)lnm_session_key, CIFS_SESS_KEY_SIZE);
bcc_ptr += CIFS_SESS_KEY_SIZE;
diff --git a/fs/cifs/smbdes.c b/fs/cifs/smbdes.c
index cfa6d21fb4e..04943c976f9 100644
--- a/fs/cifs/smbdes.c
+++ b/fs/cifs/smbdes.c
@@ -114,42 +114,42 @@ static uchar sbox[8][4][16] = {
{{14, 4, 13, 1, 2, 15, 11, 8, 3, 10, 6, 12, 5, 9, 0, 7},
{0, 15, 7, 4, 14, 2, 13, 1, 10, 6, 12, 11, 9, 5, 3, 8},
{4, 1, 14, 8, 13, 6, 2, 11, 15, 12, 9, 7, 3, 10, 5, 0},
- {15, 12, 8, 2, 4, 9, 1, 7, 5, 11, 3, 14, 10, 0, 6, 13}},
+ {15, 12, 8, 2, 4, 9, 1, 7, 5, 11, 3, 14, 10, 0, 6, 13} },
{{15, 1, 8, 14, 6, 11, 3, 4, 9, 7, 2, 13, 12, 0, 5, 10},
{3, 13, 4, 7, 15, 2, 8, 14, 12, 0, 1, 10, 6, 9, 11, 5},
{0, 14, 7, 11, 10, 4, 13, 1, 5, 8, 12, 6, 9, 3, 2, 15},
- {13, 8, 10, 1, 3, 15, 4, 2, 11, 6, 7, 12, 0, 5, 14, 9}},
+ {13, 8, 10, 1, 3, 15, 4, 2, 11, 6, 7, 12, 0, 5, 14, 9} },
{{10, 0, 9, 14, 6, 3, 15, 5, 1, 13, 12, 7, 11, 4, 2, 8},
{13, 7, 0, 9, 3, 4, 6, 10, 2, 8, 5, 14, 12, 11, 15, 1},
{13, 6, 4, 9, 8, 15, 3, 0, 11, 1, 2, 12, 5, 10, 14, 7},
- {1, 10, 13, 0, 6, 9, 8, 7, 4, 15, 14, 3, 11, 5, 2, 12}},
+ {1, 10, 13, 0, 6, 9, 8, 7, 4, 15, 14, 3, 11, 5, 2, 12} },
{{7, 13, 14, 3, 0, 6, 9, 10, 1, 2, 8, 5, 11, 12, 4, 15},
{13, 8, 11, 5, 6, 15, 0, 3, 4, 7, 2, 12, 1, 10, 14, 9},
{10, 6, 9, 0, 12, 11, 7, 13, 15, 1, 3, 14, 5, 2, 8, 4},
- {3, 15, 0, 6, 10, 1, 13, 8, 9, 4, 5, 11, 12, 7, 2, 14}},
+ {3, 15, 0, 6, 10, 1, 13, 8, 9, 4, 5, 11, 12, 7, 2, 14} },
{{2, 12, 4, 1, 7, 10, 11, 6, 8, 5, 3, 15, 13, 0, 14, 9},
{14, 11, 2, 12, 4, 7, 13, 1, 5, 0, 15, 10, 3, 9, 8, 6},
{4, 2, 1, 11, 10, 13, 7, 8, 15, 9, 12, 5, 6, 3, 0, 14},
- {11, 8, 12, 7, 1, 14, 2, 13, 6, 15, 0, 9, 10, 4, 5, 3}},
+ {11, 8, 12, 7, 1, 14, 2, 13, 6, 15, 0, 9, 10, 4, 5, 3} },
{{12, 1, 10, 15, 9, 2, 6, 8, 0, 13, 3, 4, 14, 7, 5, 11},
{10, 15, 4, 2, 7, 12, 9, 5, 6, 1, 13, 14, 0, 11, 3, 8},
{9, 14, 15, 5, 2, 8, 12, 3, 7, 0, 4, 10, 1, 13, 11, 6},
- {4, 3, 2, 12, 9, 5, 15, 10, 11, 14, 1, 7, 6, 0, 8, 13}},
+ {4, 3, 2, 12, 9, 5, 15, 10, 11, 14, 1, 7, 6, 0, 8, 13} },
{{4, 11, 2, 14, 15, 0, 8, 13, 3, 12, 9, 7, 5, 10, 6, 1},
{13, 0, 11, 7, 4, 9, 1, 10, 14, 3, 5, 12, 2, 15, 8, 6},
{1, 4, 11, 13, 12, 3, 7, 14, 10, 15, 6, 8, 0, 5, 9, 2},
- {6, 11, 13, 8, 1, 4, 10, 7, 9, 5, 0, 15, 14, 2, 3, 12}},
+ {6, 11, 13, 8, 1, 4, 10, 7, 9, 5, 0, 15, 14, 2, 3, 12} },
{{13, 2, 8, 4, 6, 15, 11, 1, 10, 9, 3, 14, 5, 0, 12, 7},
{1, 15, 13, 8, 10, 3, 7, 4, 12, 5, 6, 11, 0, 14, 9, 2},
{7, 11, 4, 1, 9, 12, 14, 2, 0, 6, 10, 13, 15, 3, 5, 8},
- {2, 1, 14, 7, 4, 10, 8, 13, 15, 12, 9, 0, 3, 5, 6, 11}}
+ {2, 1, 14, 7, 4, 10, 8, 13, 15, 12, 9, 0, 3, 5, 6, 11} }
};
static void
@@ -313,9 +313,8 @@ str_to_key(unsigned char *str, unsigned char *key)
key[5] = ((str[4] & 0x1F) << 2) | (str[5] >> 6);
key[6] = ((str[5] & 0x3F) << 1) | (str[6] >> 7);
key[7] = str[6] & 0x7F;
- for (i = 0; i < 8; i++) {
+ for (i = 0; i < 8; i++)
key[i] = (key[i] << 1);
- }
}
static void
@@ -344,9 +343,8 @@ smbhash(unsigned char *out, unsigned char *in, unsigned char *key, int forw)
dohash(outb, inb, keyb, forw);
- for (i = 0; i < 8; i++) {
+ for (i = 0; i < 8; i++)
out[i] = 0;
- }
for (i = 0; i < 64; i++) {
if (outb[i])
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
index 50b623ad932..3612d6c0a0b 100644
--- a/fs/cifs/transport.c
+++ b/fs/cifs/transport.c
@@ -1,7 +1,7 @@
/*
* fs/cifs/transport.c
*
- * Copyright (C) International Business Machines Corp., 2002,2007
+ * Copyright (C) International Business Machines Corp., 2002,2008
* Author(s): Steve French (sfrench@us.ibm.com)
* Jeremy Allison (jra@samba.org) 2006.
*
@@ -358,9 +358,9 @@ static int allocate_mid(struct cifsSesInfo *ses, struct smb_hdr *in_buf,
} else if (ses->status != CifsGood) {
/* check if SMB session is bad because we are setting it up */
if ((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) &&
- (in_buf->Command != SMB_COM_NEGOTIATE)) {
+ (in_buf->Command != SMB_COM_NEGOTIATE))
return -EAGAIN;
- } /* else ok - we are setting up session */
+ /* else ok - we are setting up session */
}
*ppmidQ = AllocMidQEntry(in_buf, ses);
if (*ppmidQ == NULL)
@@ -437,9 +437,8 @@ SendReceiveNoRsp(const unsigned int xid, struct cifsSesInfo *ses,
iov[0].iov_len = in_buf->smb_buf_length + 4;
flags |= CIFS_NO_RESP;
rc = SendReceive2(xid, ses, iov, 1, &resp_buf_type, flags);
-#ifdef CONFIG_CIFS_DEBUG2
- cFYI(1, ("SendRcvNoR flags %d rc %d", flags, rc));
-#endif
+ cFYI(DBG2, ("SendRcvNoRsp flags %d rc %d", flags, rc));
+
return rc;
}
diff --git a/fs/cifs/xattr.c b/fs/cifs/xattr.c
index 54e8ef96cb7..8cd6a445b01 100644
--- a/fs/cifs/xattr.c
+++ b/fs/cifs/xattr.c
@@ -139,9 +139,9 @@ int cifs_setxattr(struct dentry *direntry, const char *ea_name,
} else if (strncmp(ea_name, CIFS_XATTR_USER_PREFIX, 5) == 0) {
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
goto set_ea_exit;
- if (strncmp(ea_name, CIFS_XATTR_DOS_ATTRIB, 14) == 0) {
+ if (strncmp(ea_name, CIFS_XATTR_DOS_ATTRIB, 14) == 0)
cFYI(1, ("attempt to set cifs inode metadata"));
- }
+
ea_name += 5; /* skip past user. prefix */
rc = CIFSSMBSetEA(xid, pTcon, full_path, ea_name, ea_value,
(__u16)value_size, cifs_sb->local_nls,
@@ -262,7 +262,7 @@ ssize_t cifs_getxattr(struct dentry *direntry, const char *ea_name,
cifs_sb->mnt_cifs_flags &
CIFS_MOUNT_MAP_SPECIAL_CHR);
#ifdef CONFIG_CIFS_EXPERIMENTAL
- else if(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL) {
+ else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL) {
__u16 fid;
int oplock = FALSE;
struct cifs_ntsd *pacl = NULL;
@@ -303,11 +303,10 @@ ssize_t cifs_getxattr(struct dentry *direntry, const char *ea_name,
} else if (strncmp(ea_name,
CIFS_XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN) == 0) {
cFYI(1, ("Security xattr namespace not supported yet"));
- } else {
+ } else
cFYI(1,
("illegal xattr request %s (only user namespace supported)",
ea_name));
- }
/* We could add an additional check for streams ie
if proc/fs/cifs/streamstoxattr is set then
diff --git a/fs/proc/proc_misc.c b/fs/proc/proc_misc.c
index 468805d40e2..2d563979cb0 100644
--- a/fs/proc/proc_misc.c
+++ b/fs/proc/proc_misc.c
@@ -32,6 +32,7 @@
#include <linux/interrupt.h>
#include <linux/swap.h>
#include <linux/slab.h>
+#include <linux/genhd.h>
#include <linux/smp.h>
#include <linux/signal.h>
#include <linux/module.h>
@@ -377,7 +378,6 @@ static int stram_read_proc(char *page, char **start, off_t off,
#endif
#ifdef CONFIG_BLOCK
-extern const struct seq_operations partitions_op;
static int partitions_open(struct inode *inode, struct file *file)
{
return seq_open(file, &partitions_op);
@@ -389,7 +389,6 @@ static const struct file_operations proc_partitions_operations = {
.release = seq_release,
};
-extern const struct seq_operations diskstats_op;
static int diskstats_open(struct inode *inode, struct file *file)
{
return seq_open(file, &diskstats_op);
diff --git a/fs/splice.c b/fs/splice.c
index 9b559ee711a..0670c915cd3 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -1669,6 +1669,13 @@ static int link_pipe(struct pipe_inode_info *ipipe,
i++;
} while (len);
+ /*
+ * return EAGAIN if we have the potential of some data in the
+ * future, otherwise just return 0
+ */
+ if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
+ ret = -EAGAIN;
+
inode_double_unlock(ipipe->inode, opipe->inode);
/*
@@ -1709,11 +1716,8 @@ static long do_tee(struct file *in, struct file *out, size_t len,
ret = link_ipipe_prep(ipipe, flags);
if (!ret) {
ret = link_opipe_prep(opipe, flags);
- if (!ret) {
+ if (!ret)
ret = link_pipe(ipipe, opipe, len, flags);
- if (!ret && (flags & SPLICE_F_NONBLOCK))
- ret = -EAGAIN;
- }
}
}
diff --git a/include/asm-m68k/unistd.h b/include/asm-m68k/unistd.h
index 87f77b11931..e72ba563f10 100644
--- a/include/asm-m68k/unistd.h
+++ b/include/asm-m68k/unistd.h
@@ -320,13 +320,15 @@
#define __NR_epoll_pwait 315
#define __NR_utimensat 316
#define __NR_signalfd 317
-#define __NR_timerfd 318
+#define __NR_timerfd_create 318
#define __NR_eventfd 319
#define __NR_fallocate 320
+#define __NR_timerfd_settime 321
+#define __NR_timerfd_gettime 322
#ifdef __KERNEL__
-#define NR_syscalls 321
+#define NR_syscalls 323
#define __ARCH_WANT_IPC_PARSE_VERSION
#define __ARCH_WANT_OLD_READDIR
diff --git a/include/asm-m68knommu/machdep.h b/include/asm-m68knommu/machdep.h
index 1cf26d240d8..de9f47a51cc 100644
--- a/include/asm-m68knommu/machdep.h
+++ b/include/asm-m68knommu/machdep.h
@@ -21,4 +21,6 @@ extern void (*mach_power_off)( void );
extern void config_BSP(char *command, int len);
+extern void do_IRQ(int irq, struct pt_regs *fp);
+
#endif /* _M68KNOMMU_MACHDEP_H */
diff --git a/include/asm-m68knommu/unistd.h b/include/asm-m68knommu/unistd.h
index 27c2f9bb4db..4ba98b9c5d7 100644
--- a/include/asm-m68knommu/unistd.h
+++ b/include/asm-m68knommu/unistd.h
@@ -321,13 +321,15 @@
#define __NR_epoll_pwait 315
#define __NR_utimensat 316
#define __NR_signalfd 317
-#define __NR_timerfd 318
+#define __NR_timerfd_create 318
#define __NR_eventfd 319
#define __NR_fallocate 320
+#define __NR_timerfd_settime 321
+#define __NR_timerfd_gettime 322
#ifdef __KERNEL__
-#define NR_syscalls 321
+#define NR_syscalls 323
#define __ARCH_WANT_IPC_PARSE_VERSION
#define __ARCH_WANT_OLD_READDIR
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 6fe67d1939c..6f79d40dd3c 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -216,8 +216,8 @@ struct request {
unsigned int cmd_len;
unsigned char cmd[BLK_MAX_CDB];
- unsigned int raw_data_len;
unsigned int data_len;
+ unsigned int extra_len; /* length of alignment and padding */
unsigned int sense_len;
void *data;
void *sense;
@@ -362,6 +362,7 @@ struct request_queue
unsigned long seg_boundary_mask;
void *dma_drain_buffer;
unsigned int dma_drain_size;
+ unsigned int dma_pad_mask;
unsigned int dma_alignment;
struct blk_queue_tag *queue_tags;
@@ -701,6 +702,7 @@ extern void blk_queue_max_hw_segments(struct request_queue *, unsigned short);
extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
extern void blk_queue_hardsect_size(struct request_queue *, unsigned short);
extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b);
+extern void blk_queue_dma_pad(struct request_queue *, unsigned int);
extern int blk_queue_dma_drain(struct request_queue *q,
dma_drain_needed_fn *dma_drain_needed,
void *buf, unsigned int size);
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index acbb364674f..261e43a4c87 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -366,7 +366,7 @@ __dma_has_cap(enum dma_transaction_type tx_type, dma_cap_mask_t *srcp)
*/
static inline void dma_async_issue_pending(struct dma_chan *chan)
{
- return chan->device->device_issue_pending(chan);
+ chan->device->device_issue_pending(chan);
}
#define dma_async_memcpy_issue_pending(chan) dma_async_issue_pending(chan)
diff --git a/include/linux/genhd.h b/include/linux/genhd.h
index 09a3b18918c..32c2ac49a07 100644
--- a/include/linux/genhd.h
+++ b/include/linux/genhd.h
@@ -18,11 +18,13 @@
#define dev_to_disk(device) container_of(device, struct gendisk, dev)
#define dev_to_part(device) container_of(device, struct hd_struct, dev)
-extern struct device_type disk_type;
extern struct device_type part_type;
extern struct kobject *block_depr;
extern struct class block_class;
+extern const struct seq_operations partitions_op;
+extern const struct seq_operations diskstats_op;
+
enum {
/* These three have identical behaviour; use the second one if DOS FDISK gets
confused about extended/logical partitions starting past cylinder 1023. */
@@ -556,7 +558,6 @@ extern struct gendisk *alloc_disk_node(int minors, int node_id);
extern struct gendisk *alloc_disk(int minors);
extern struct kobject *get_disk(struct gendisk *disk);
extern void put_disk(struct gendisk *disk);
-extern void genhd_media_change_notify(struct gendisk *disk);
extern void blk_register_region(dev_t devt, unsigned long range,
struct module *module,
struct kobject *(*probe)(dev_t, int *, void *),
diff --git a/include/linux/kvm.h b/include/linux/kvm.h
index 4de4fd2d860..c1ec04fd000 100644
--- a/include/linux/kvm.h
+++ b/include/linux/kvm.h
@@ -221,6 +221,7 @@ struct kvm_vapic_addr {
* Get size for mmap(vcpu_fd)
*/
#define KVM_GET_VCPU_MMAP_SIZE _IO(KVMIO, 0x04) /* in bytes */
+#define KVM_GET_SUPPORTED_CPUID _IOWR(KVMIO, 0x05, struct kvm_cpuid2)
/*
* Extension capability list.
@@ -230,8 +231,8 @@ struct kvm_vapic_addr {
#define KVM_CAP_MMU_SHADOW_CACHE_CONTROL 2
#define KVM_CAP_USER_MEMORY 3
#define KVM_CAP_SET_TSS_ADDR 4
-#define KVM_CAP_EXT_CPUID 5
#define KVM_CAP_VAPIC 6
+#define KVM_CAP_EXT_CPUID 7
/*
* ioctls for VM fds
@@ -249,7 +250,6 @@ struct kvm_vapic_addr {
#define KVM_CREATE_VCPU _IO(KVMIO, 0x41)
#define KVM_GET_DIRTY_LOG _IOW(KVMIO, 0x42, struct kvm_dirty_log)
#define KVM_SET_MEMORY_ALIAS _IOW(KVMIO, 0x43, struct kvm_memory_alias)
-#define KVM_GET_SUPPORTED_CPUID _IOWR(KVMIO, 0x48, struct kvm_cpuid2)
/* Device model IOC */
#define KVM_CREATE_IRQCHIP _IO(KVMIO, 0x60)
#define KVM_IRQ_LINE _IOW(KVMIO, 0x61, struct kvm_irq_level)
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index ea4764b0a2f..928b0d59e9b 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -107,6 +107,7 @@ struct kvm_memory_slot {
struct kvm {
struct mutex lock; /* protects the vcpus array and APIC accesses */
spinlock_t mmu_lock;
+ struct rw_semaphore slots_lock;
struct mm_struct *mm; /* userspace tied to this vm */
int nmemslots;
struct kvm_memory_slot memslots[KVM_MEMORY_SLOTS +
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 2c9621f8bf8..9ae4030067a 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1542,10 +1542,6 @@ extern unsigned int sysctl_sched_child_runs_first;
extern unsigned int sysctl_sched_features;
extern unsigned int sysctl_sched_migration_cost;
extern unsigned int sysctl_sched_nr_migrate;
-#if defined(CONFIG_FAIR_GROUP_SCHED) && defined(CONFIG_SMP)
-extern unsigned int sysctl_sched_min_bal_int_shares;
-extern unsigned int sysctl_sched_max_bal_int_shares;
-#endif
int sched_nr_latency_handler(struct ctl_table *table, int write,
struct file *file, void __user *buffer, size_t *length,
diff --git a/kernel/power/process.c b/kernel/power/process.c
index 7c2118f9597..f1d0b345c9b 100644
--- a/kernel/power/process.c
+++ b/kernel/power/process.c
@@ -75,22 +75,15 @@ void refrigerator(void)
__set_current_state(save);
}
-static void fake_signal_wake_up(struct task_struct *p, int resume)
+static void fake_signal_wake_up(struct task_struct *p)
{
unsigned long flags;
spin_lock_irqsave(&p->sighand->siglock, flags);
- signal_wake_up(p, resume);
+ signal_wake_up(p, 0);
spin_unlock_irqrestore(&p->sighand->siglock, flags);
}
-static void send_fake_signal(struct task_struct *p)
-{
- if (task_is_stopped(p))
- force_sig_specific(SIGSTOP, p);
- fake_signal_wake_up(p, task_is_stopped(p));
-}
-
static int has_mm(struct task_struct *p)
{
return (p->mm && !(p->flags & PF_BORROWED_MM));
@@ -121,7 +114,7 @@ static int freeze_task(struct task_struct *p, int with_mm_only)
if (freezing(p)) {
if (has_mm(p)) {
if (!signal_pending(p))
- fake_signal_wake_up(p, 0);
+ fake_signal_wake_up(p);
} else {
if (with_mm_only)
ret = 0;
@@ -135,7 +128,7 @@ static int freeze_task(struct task_struct *p, int with_mm_only)
} else {
if (has_mm(p)) {
set_freeze_flag(p);
- send_fake_signal(p);
+ fake_signal_wake_up(p);
} else {
if (with_mm_only) {
ret = 0;
@@ -182,15 +175,17 @@ static int try_to_freeze_tasks(int freeze_user_space)
if (frozen(p) || !freezeable(p))
continue;
- if (task_is_traced(p) && frozen(p->parent)) {
- cancel_freezing(p);
- continue;
- }
-
if (!freeze_task(p, freeze_user_space))
continue;
- if (!freezer_should_skip(p))
+ /*
+ * Now that we've done set_freeze_flag, don't
+ * perturb a task in TASK_STOPPED or TASK_TRACED.
+ * It is "frozen enough". If the task does wake
+ * up, it will immediately call try_to_freeze.
+ */
+ if (!task_is_stopped_or_traced(p) &&
+ !freezer_should_skip(p))
todo++;
} while_each_thread(g, p);
read_unlock(&tasklist_lock);
diff --git a/kernel/sched.c b/kernel/sched.c
index f06950c8a6c..dcd553cc4ee 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -174,41 +174,6 @@ struct task_group {
struct sched_entity **se;
/* runqueue "owned" by this group on each cpu */
struct cfs_rq **cfs_rq;
-
- /*
- * shares assigned to a task group governs how much of cpu bandwidth
- * is allocated to the group. The more shares a group has, the more is
- * the cpu bandwidth allocated to it.
- *
- * For ex, lets say that there are three task groups, A, B and C which
- * have been assigned shares 1000, 2000 and 3000 respectively. Then,
- * cpu bandwidth allocated by the scheduler to task groups A, B and C
- * should be:
- *
- * Bw(A) = 1000/(1000+2000+3000) * 100 = 16.66%
- * Bw(B) = 2000/(1000+2000+3000) * 100 = 33.33%
- * Bw(C) = 3000/(1000+2000+3000) * 100 = 50%
- *
- * The weight assigned to a task group's schedulable entities on every
- * cpu (task_group.se[a_cpu]->load.weight) is derived from the task
- * group's shares. For ex: lets say that task group A has been
- * assigned shares of 1000 and there are two CPUs in a system. Then,
- *
- * tg_A->se[0]->load.weight = tg_A->se[1]->load.weight = 1000;
- *
- * Note: It's not necessary that each of a task's group schedulable
- * entity have the same weight on all CPUs. If the group
- * has 2 of its tasks on CPU0 and 1 task on CPU1, then a
- * better distribution of weight could be:
- *
- * tg_A->se[0]->load.weight = 2/3 * 2000 = 1333
- * tg_A->se[1]->load.weight = 1/2 * 2000 = 667
- *
- * rebalance_shares() is responsible for distributing the shares of a
- * task groups like this among the group's schedulable entities across
- * cpus.
- *
- */
unsigned long shares;
#endif
@@ -250,22 +215,12 @@ static DEFINE_SPINLOCK(task_group_lock);
static DEFINE_MUTEX(doms_cur_mutex);
#ifdef CONFIG_FAIR_GROUP_SCHED
-#ifdef CONFIG_SMP
-/* kernel thread that runs rebalance_shares() periodically */
-static struct task_struct *lb_monitor_task;
-static int load_balance_monitor(void *unused);
-#endif
-
-static void set_se_shares(struct sched_entity *se, unsigned long shares);
-
#ifdef CONFIG_USER_SCHED
# define INIT_TASK_GROUP_LOAD (2*NICE_0_LOAD)
#else
# define INIT_TASK_GROUP_LOAD NICE_0_LOAD
#endif
-#define MIN_GROUP_SHARES 2
-
static int init_task_group_load = INIT_TASK_GROUP_LOAD;
#endif
@@ -1245,16 +1200,6 @@ static void cpuacct_charge(struct task_struct *tsk, u64 cputime);
static inline void cpuacct_charge(struct task_struct *tsk, u64 cputime) {}
#endif
-static inline void inc_cpu_load(struct rq *rq, unsigned long load)
-{
- update_load_add(&rq->load, load);
-}
-
-static inline void dec_cpu_load(struct rq *rq, unsigned long load)
-{
- update_load_sub(&rq->load, load);
-}
-
#ifdef CONFIG_SMP
static unsigned long source_load(int cpu, int type);
static unsigned long target_load(int cpu, int type);
@@ -1272,14 +1217,26 @@ static int task_hot(struct task_struct *p, u64 now, struct sched_domain *sd);
#define sched_class_highest (&rt_sched_class)
-static void inc_nr_running(struct rq *rq)
+static inline void inc_load(struct rq *rq, const struct task_struct *p)
+{
+ update_load_add(&rq->load, p->se.load.weight);
+}
+
+static inline void dec_load(struct rq *rq, const struct task_struct *p)
+{
+ update_load_sub(&rq->load, p->se.load.weight);
+}
+
+static void inc_nr_running(struct task_struct *p, struct rq *rq)
{
rq->nr_running++;
+ inc_load(rq, p);
}
-static void dec_nr_running(struct rq *rq)
+static void dec_nr_running(struct task_struct *p, struct rq *rq)
{
rq->nr_running--;
+ dec_load(rq, p);
}
static void set_load_weight(struct task_struct *p)
@@ -1371,7 +1328,7 @@ static void activate_task(struct rq *rq, struct task_struct *p, int wakeup)
rq->nr_uninterruptible--;
enqueue_task(rq, p, wakeup);
- inc_nr_running(rq);
+ inc_nr_running(p, rq);
}
/*
@@ -1383,7 +1340,7 @@ static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep)
rq->nr_uninterruptible++;
dequeue_task(rq, p, sleep);
- dec_nr_running(rq);
+ dec_nr_running(p, rq);
}
/**
@@ -2023,7 +1980,7 @@ void wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
* management (if any):
*/
p->sched_class->task_new(rq, p);
- inc_nr_running(rq);
+ inc_nr_running(p, rq);
}
check_preempt_curr(rq, p);
#ifdef CONFIG_SMP
@@ -4362,8 +4319,10 @@ void set_user_nice(struct task_struct *p, long nice)
goto out_unlock;
}
on_rq = p->se.on_rq;
- if (on_rq)
+ if (on_rq) {
dequeue_task(rq, p, 0);
+ dec_load(rq, p);
+ }
p->static_prio = NICE_TO_PRIO(nice);
set_load_weight(p);
@@ -4373,6 +4332,7 @@ void set_user_nice(struct task_struct *p, long nice)
if (on_rq) {
enqueue_task(rq, p, 0);
+ inc_load(rq, p);
/*
* If the task increased its priority or is running and
* lowered its priority, then reschedule its CPU:
@@ -7087,21 +7047,6 @@ void __init sched_init_smp(void)
if (set_cpus_allowed(current, non_isolated_cpus) < 0)
BUG();
sched_init_granularity();
-
-#ifdef CONFIG_FAIR_GROUP_SCHED
- if (nr_cpu_ids == 1)
- return;
-
- lb_monitor_task = kthread_create(load_balance_monitor, NULL,
- "group_balance");
- if (!IS_ERR(lb_monitor_task)) {
- lb_monitor_task->flags |= PF_NOFREEZE;
- wake_up_process(lb_monitor_task);
- } else {
- printk(KERN_ERR "Could not create load balance monitor thread"
- "(error = %ld) \n", PTR_ERR(lb_monitor_task));
- }
-#endif
}
#else
void __init sched_init_smp(void)
@@ -7424,157 +7369,6 @@ void set_curr_task(int cpu, struct task_struct *p)
#ifdef CONFIG_GROUP_SCHED
-#if defined CONFIG_FAIR_GROUP_SCHED && defined CONFIG_SMP
-/*
- * distribute shares of all task groups among their schedulable entities,
- * to reflect load distribution across cpus.
- */
-static int rebalance_shares(struct sched_domain *sd, int this_cpu)
-{
- struct cfs_rq *cfs_rq;
- struct rq *rq = cpu_rq(this_cpu);
- cpumask_t sdspan = sd->span;
- int balanced = 1;
-
- /* Walk thr' all the task groups that we have */
- for_each_leaf_cfs_rq(rq, cfs_rq) {
- int i;
- unsigned long total_load = 0, total_shares;
- struct task_group *tg = cfs_rq->tg;
-
- /* Gather total task load of this group across cpus */
- for_each_cpu_mask(i, sdspan)
- total_load += tg->cfs_rq[i]->load.weight;
-
- /* Nothing to do if this group has no load */
- if (!total_load)
- continue;
-
- /*
- * tg->shares represents the number of cpu shares the task group
- * is eligible to hold on a single cpu. On N cpus, it is
- * eligible to hold (N * tg->shares) number of cpu shares.
- */
- total_shares = tg->shares * cpus_weight(sdspan);
-
- /*
- * redistribute total_shares across cpus as per the task load
- * distribution.
- */
- for_each_cpu_mask(i, sdspan) {
- unsigned long local_load, local_shares;
-
- local_load = tg->cfs_rq[i]->load.weight;
- local_shares = (local_load * total_shares) / total_load;
- if (!local_shares)
- local_shares = MIN_GROUP_SHARES;
- if (local_shares == tg->se[i]->load.weight)
- continue;
-
- spin_lock_irq(&cpu_rq(i)->lock);
- set_se_shares(tg->se[i], local_shares);
- spin_unlock_irq(&cpu_rq(i)->lock);
- balanced = 0;
- }
- }
-
- return balanced;
-}
-
-/*
- * How frequently should we rebalance_shares() across cpus?
- *
- * The more frequently we rebalance shares, the more accurate is the fairness
- * of cpu bandwidth distribution between task groups. However higher frequency
- * also implies increased scheduling overhead.
- *
- * sysctl_sched_min_bal_int_shares represents the minimum interval between
- * consecutive calls to rebalance_shares() in the same sched domain.
- *
- * sysctl_sched_max_bal_int_shares represents the maximum interval between
- * consecutive calls to rebalance_shares() in the same sched domain.
- *
- * These settings allows for the appropriate trade-off between accuracy of
- * fairness and the associated overhead.
- *
- */
-
-/* default: 8ms, units: milliseconds */
-const_debug unsigned int sysctl_sched_min_bal_int_shares = 8;
-
-/* default: 128ms, units: milliseconds */
-const_debug unsigned int sysctl_sched_max_bal_int_shares = 128;
-
-/* kernel thread that runs rebalance_shares() periodically */
-static int load_balance_monitor(void *unused)
-{
- unsigned int timeout = sysctl_sched_min_bal_int_shares;
- struct sched_param schedparm;
- int ret;
-
- /*
- * We don't want this thread's execution to be limited by the shares
- * assigned to default group (init_task_group). Hence make it run
- * as a SCHED_RR RT task at the lowest priority.
- */
- schedparm.sched_priority = 1;
- ret = sched_setscheduler(current, SCHED_RR, &schedparm);
- if (ret)
- printk(KERN_ERR "Couldn't set SCHED_RR policy for load balance"
- " monitor thread (error = %d) \n", ret);
-
- while (!kthread_should_stop()) {
- int i, cpu, balanced = 1;
-
- /* Prevent cpus going down or coming up */
- get_online_cpus();
- /* lockout changes to doms_cur[] array */
- lock_doms_cur();
- /*
- * Enter a rcu read-side critical section to safely walk rq->sd
- * chain on various cpus and to walk task group list
- * (rq->leaf_cfs_rq_list) in rebalance_shares().
- */
- rcu_read_lock();
-
- for (i = 0; i < ndoms_cur; i++) {
- cpumask_t cpumap = doms_cur[i];
- struct sched_domain *sd = NULL, *sd_prev = NULL;
-
- cpu = first_cpu(cpumap);
-
- /* Find the highest domain at which to balance shares */
- for_each_domain(cpu, sd) {
- if (!(sd->flags & SD_LOAD_BALANCE))
- continue;
- sd_prev = sd;
- }
-
- sd = sd_prev;
- /* sd == NULL? No load balance reqd in this domain */
- if (!sd)
- continue;
-
- balanced &= rebalance_shares(sd, cpu);
- }
-
- rcu_read_unlock();
-
- unlock_doms_cur();
- put_online_cpus();
-
- if (!balanced)
- timeout = sysctl_sched_min_bal_int_shares;
- else if (timeout < sysctl_sched_max_bal_int_shares)
- timeout *= 2;
-
- msleep_interruptible(timeout);
- }
-
- return 0;
-}
-#endif /* CONFIG_SMP */
-
#ifdef CONFIG_FAIR_GROUP_SCHED
static void free_fair_sched_group(struct task_group *tg)
{
@@ -7841,29 +7635,25 @@ void sched_move_task(struct task_struct *tsk)
}
#ifdef CONFIG_FAIR_GROUP_SCHED
-/* rq->lock to be locked by caller */
static void set_se_shares(struct sched_entity *se, unsigned long shares)
{
struct cfs_rq *cfs_rq = se->cfs_rq;
struct rq *rq = cfs_rq->rq;
int on_rq;
- if (!shares)
- shares = MIN_GROUP_SHARES;
+ spin_lock_irq(&rq->lock);
on_rq = se->on_rq;
- if (on_rq) {
+ if (on_rq)
dequeue_entity(cfs_rq, se, 0);
- dec_cpu_load(rq, se->load.weight);
- }
se->load.weight = shares;
se->load.inv_weight = div64_64((1ULL<<32), shares);
- if (on_rq) {
+ if (on_rq)
enqueue_entity(cfs_rq, se, 0);
- inc_cpu_load(rq, se->load.weight);
- }
+
+ spin_unlock_irq(&rq->lock);
}
static DEFINE_MUTEX(shares_mutex);
@@ -7873,18 +7663,18 @@ int sched_group_set_shares(struct task_group *tg, unsigned long shares)
int i;
unsigned long flags;
+ /*
+ * A weight of 0 or 1 can cause arithmetics problems.
+ * (The default weight is 1024 - so there's no practical
+ * limitation from this.)
+ */
+ if (shares < 2)
+ shares = 2;
+
mutex_lock(&shares_mutex);
if (tg->shares == shares)
goto done;
- if (shares < MIN_GROUP_SHARES)
- shares = MIN_GROUP_SHARES;
-
- /*
- * Prevent any load balance activity (rebalance_shares,
- * load_balance_fair) from referring to this group first,
- * by taking it off the rq->leaf_cfs_rq_list on each cpu.
- */
spin_lock_irqsave(&task_group_lock, flags);
for_each_possible_cpu(i)
unregister_fair_sched_group(tg, i);
@@ -7898,11 +7688,8 @@ int sched_group_set_shares(struct task_group *tg, unsigned long shares)
* w/o tripping rebalance_share or load_balance_fair.
*/
tg->shares = shares;
- for_each_possible_cpu(i) {
- spin_lock_irq(&cpu_rq(i)->lock);
+ for_each_possible_cpu(i)
set_se_shares(tg->se[i], shares);
- spin_unlock_irq(&cpu_rq(i)->lock);
- }
/*
* Enable load balance activity on this group, by inserting it back on
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index c8e6492c592..3df4d46994c 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -727,8 +727,6 @@ static inline struct sched_entity *parent_entity(struct sched_entity *se)
return se->parent;
}
-#define GROUP_IMBALANCE_PCT 20
-
#else /* CONFIG_FAIR_GROUP_SCHED */
#define for_each_sched_entity(se) \
@@ -819,26 +817,15 @@ hrtick_start_fair(struct rq *rq, struct task_struct *p)
static void enqueue_task_fair(struct rq *rq, struct task_struct *p, int wakeup)
{
struct cfs_rq *cfs_rq;
- struct sched_entity *se = &p->se,
- *topse = NULL; /* Highest schedulable entity */
- int incload = 1;
+ struct sched_entity *se = &p->se;
for_each_sched_entity(se) {
- topse = se;
- if (se->on_rq) {
- incload = 0;
+ if (se->on_rq)
break;
- }
cfs_rq = cfs_rq_of(se);
enqueue_entity(cfs_rq, se, wakeup);
wakeup = 1;
}
- /* Increment cpu load if we just enqueued the first task of a group on
- * 'rq->cpu'. 'topse' represents the group to which task 'p' belongs
- * at the highest grouping level.
- */
- if (incload)
- inc_cpu_load(rq, topse->load.weight);
hrtick_start_fair(rq, rq->curr);
}
@@ -851,28 +838,16 @@ static void enqueue_task_fair(struct rq *rq, struct task_struct *p, int wakeup)
static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int sleep)
{
struct cfs_rq *cfs_rq;
- struct sched_entity *se = &p->se,
- *topse = NULL; /* Highest schedulable entity */
- int decload = 1;
+ struct sched_entity *se = &p->se;
for_each_sched_entity(se) {
- topse = se;
cfs_rq = cfs_rq_of(se);
dequeue_entity(cfs_rq, se, sleep);
/* Don't dequeue parent if it has other entities besides us */
- if (cfs_rq->load.weight) {
- if (parent_entity(se))
- decload = 0;
+ if (cfs_rq->load.weight)
break;
- }
sleep = 1;
}
- /* Decrement cpu load if we just dequeued the last task of a group on
- * 'rq->cpu'. 'topse' represents the group to which task 'p' belongs
- * at the highest grouping level.
- */
- if (decload)
- dec_cpu_load(rq, topse->load.weight);
hrtick_start_fair(rq, rq->curr);
}
@@ -1186,6 +1161,25 @@ static struct task_struct *load_balance_next_fair(void *arg)
return __load_balance_iterator(cfs_rq, cfs_rq->rb_load_balance_curr);
}
+#ifdef CONFIG_FAIR_GROUP_SCHED
+static int cfs_rq_best_prio(struct cfs_rq *cfs_rq)
+{
+ struct sched_entity *curr;
+ struct task_struct *p;
+
+ if (!cfs_rq->nr_running || !first_fair(cfs_rq))
+ return MAX_PRIO;
+
+ curr = cfs_rq->curr;
+ if (!curr)
+ curr = __pick_next_entity(cfs_rq);
+
+ p = task_of(curr);
+
+ return p->prio;
+}
+#endif
+
static unsigned long
load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
unsigned long max_load_move,
@@ -1195,45 +1189,28 @@ load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
struct cfs_rq *busy_cfs_rq;
long rem_load_move = max_load_move;
struct rq_iterator cfs_rq_iterator;
- unsigned long load_moved;
cfs_rq_iterator.start = load_balance_start_fair;
cfs_rq_iterator.next = load_balance_next_fair;
for_each_leaf_cfs_rq(busiest, busy_cfs_rq) {
#ifdef CONFIG_FAIR_GROUP_SCHED
- struct cfs_rq *this_cfs_rq = busy_cfs_rq->tg->cfs_rq[this_cpu];
- unsigned long maxload, task_load, group_weight;
- unsigned long thisload, per_task_load;
- struct sched_entity *se = busy_cfs_rq->tg->se[busiest->cpu];
-
- task_load = busy_cfs_rq->load.weight;
- group_weight = se->load.weight;
+ struct cfs_rq *this_cfs_rq;
+ long imbalance;
+ unsigned long maxload;
- /*
- * 'group_weight' is contributed by tasks of total weight
- * 'task_load'. To move 'rem_load_move' worth of weight only,
- * we need to move a maximum task load of:
- *
- * maxload = (remload / group_weight) * task_load;
- */
- maxload = (rem_load_move * task_load) / group_weight;
+ this_cfs_rq = cpu_cfs_rq(busy_cfs_rq, this_cpu);
- if (!maxload || !task_load)
+ imbalance = busy_cfs_rq->load.weight - this_cfs_rq->load.weight;
+ /* Don't pull if this_cfs_rq has more load than busy_cfs_rq */
+ if (imbalance <= 0)
continue;
- per_task_load = task_load / busy_cfs_rq->nr_running;
- /*
- * balance_tasks will try to forcibly move atleast one task if
- * possible (because of SCHED_LOAD_SCALE_FUZZ). Avoid that if
- * maxload is less than GROUP_IMBALANCE_FUZZ% the per_task_load.
- */
- if (100 * maxload < GROUP_IMBALANCE_PCT * per_task_load)
- continue;
+ /* Don't pull more than imbalance/2 */
+ imbalance /= 2;
+ maxload = min(rem_load_move, imbalance);
- /* Disable priority-based load balance */
- *this_best_prio = 0;
- thisload = this_cfs_rq->load.weight;
+ *this_best_prio = cfs_rq_best_prio(this_cfs_rq);
#else
# define maxload rem_load_move
#endif
@@ -1242,33 +1219,11 @@ load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
* load_balance_[start|next]_fair iterators
*/
cfs_rq_iterator.arg = busy_cfs_rq;
- load_moved = balance_tasks(this_rq, this_cpu, busiest,
+ rem_load_move -= balance_tasks(this_rq, this_cpu, busiest,
maxload, sd, idle, all_pinned,
this_best_prio,
&cfs_rq_iterator);
-#ifdef CONFIG_FAIR_GROUP_SCHED
- /*
- * load_moved holds the task load that was moved. The
- * effective (group) weight moved would be:
- * load_moved_eff = load_moved/task_load * group_weight;
- */
- load_moved = (group_weight * load_moved) / task_load;
-
- /* Adjust shares on both cpus to reflect load_moved */
- group_weight -= load_moved;
- set_se_shares(se, group_weight);
-
- se = busy_cfs_rq->tg->se[this_cpu];
- if (!thisload)
- group_weight = load_moved;
- else
- group_weight = se->load.weight + load_moved;
- set_se_shares(se, group_weight);
-#endif
-
- rem_load_move -= load_moved;
-
if (rem_load_move <= 0)
break;
}
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index f54792b175b..76e82851754 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -393,8 +393,6 @@ static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup)
*/
for_each_sched_rt_entity(rt_se)
enqueue_rt_entity(rt_se);
-
- inc_cpu_load(rq, p->se.load.weight);
}
static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep)
@@ -414,8 +412,6 @@ static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep)
if (rt_rq && rt_rq->rt_nr_running)
enqueue_rt_entity(rt_se);
}
-
- dec_cpu_load(rq, p->se.load.weight);
}
/*
diff --git a/kernel/signal.c b/kernel/signal.c
index 84917fe507f..6af1210092c 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -1623,7 +1623,6 @@ static void ptrace_stop(int exit_code, int clear_code, siginfo_t *info)
/* Let the debugger run. */
__set_current_state(TASK_TRACED);
spin_unlock_irq(&current->sighand->siglock);
- try_to_freeze();
read_lock(&tasklist_lock);
if (!unlikely(killed) && may_ptrace_stop()) {
do_notify_parent_cldstop(current, CLD_TRAPPED);
@@ -1641,6 +1640,13 @@ static void ptrace_stop(int exit_code, int clear_code, siginfo_t *info)
}
/*
+ * While in TASK_TRACED, we were considered "frozen enough".
+ * Now that we woke up, it's crucial if we're supposed to be
+ * frozen that we freeze now before running anything substantial.
+ */
+ try_to_freeze();
+
+ /*
* We are back. Now reacquire the siglock before touching
* last_siginfo, so that we are sure to have synchronized with
* any signal-sending on another CPU that wants to examine it.
@@ -1757,9 +1763,15 @@ int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka,
sigset_t *mask = &current->blocked;
int signr = 0;
+relock:
+ /*
+ * We'll jump back here after any time we were stopped in TASK_STOPPED.
+ * While in TASK_STOPPED, we were considered "frozen enough".
+ * Now that we woke up, it's crucial if we're supposed to be
+ * frozen that we freeze now before running anything substantial.
+ */
try_to_freeze();
-relock:
spin_lock_irq(&current->sighand->siglock);
for (;;) {
struct k_sigaction *ka;
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 8b7e9541179..b2a2d6889ba 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -311,24 +311,6 @@ static struct ctl_table kern_table[] = {
.mode = 0644,
.proc_handler = &proc_dointvec,
},
-#if defined(CONFIG_FAIR_GROUP_SCHED) && defined(CONFIG_SMP)
- {
- .ctl_name = CTL_UNNUMBERED,
- .procname = "sched_min_bal_int_shares",
- .data = &sysctl_sched_min_bal_int_shares,
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = &proc_dointvec,
- },
- {
- .ctl_name = CTL_UNNUMBERED,
- .procname = "sched_max_bal_int_shares",
- .data = &sysctl_sched_max_bal_int_shares,
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = &proc_dointvec,
- },
-#endif
#endif
{
.ctl_name = CTL_UNNUMBERED,
diff --git a/virt/kvm/ioapic.c b/virt/kvm/ioapic.c
index 317f8e211cd..4232fd75dd2 100644
--- a/virt/kvm/ioapic.c
+++ b/virt/kvm/ioapic.c
@@ -211,6 +211,10 @@ static void ioapic_deliver(struct kvm_ioapic *ioapic, int irq)
case IOAPIC_LOWEST_PRIORITY:
vcpu = kvm_get_lowest_prio_vcpu(ioapic->kvm, vector,
deliver_bitmask);
+#ifdef CONFIG_X86
+ if (irq == 0)
+ vcpu = ioapic->kvm->vcpus[0];
+#endif
if (vcpu != NULL)
ioapic_inj_irq(ioapic, vcpu, vector,
trig_mode, delivery_mode);
@@ -220,6 +224,10 @@ static void ioapic_deliver(struct kvm_ioapic *ioapic, int irq)
deliver_bitmask, vector, IOAPIC_LOWEST_PRIORITY);
break;
case IOAPIC_FIXED:
+#ifdef CONFIG_X86
+ if (irq == 0)
+ deliver_bitmask = 1;
+#endif
for (vcpu_id = 0; deliver_bitmask != 0; vcpu_id++) {
if (!(deliver_bitmask & (1 << vcpu_id)))
continue;
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 32fbf800696..b2e12893e3f 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -169,6 +169,7 @@ static struct kvm *kvm_create_vm(void)
kvm_io_bus_init(&kvm->pio_bus);
mutex_init(&kvm->lock);
kvm_io_bus_init(&kvm->mmio_bus);
+ init_rwsem(&kvm->slots_lock);
spin_lock(&kvm_lock);
list_add(&kvm->vm_list, &vm_list);
spin_unlock(&kvm_lock);
@@ -339,9 +340,9 @@ int kvm_set_memory_region(struct kvm *kvm,
{
int r;
- down_write(&current->mm->mmap_sem);
+ down_write(&kvm->slots_lock);
r = __kvm_set_memory_region(kvm, mem, user_alloc);
- up_write(&current->mm->mmap_sem);
+ up_write(&kvm->slots_lock);
return r;
}
EXPORT_SYMBOL_GPL(kvm_set_memory_region);