diff options
79 files changed, 1281 insertions, 604 deletions
diff --git a/Documentation/powerpc/booting-without-of.txt b/Documentation/powerpc/booting-without-of.txt index b49ce169a63..d42d98107d4 100644 --- a/Documentation/powerpc/booting-without-of.txt +++ b/Documentation/powerpc/booting-without-of.txt @@ -1,7 +1,6 @@ Booting the Linux/ppc kernel without Open Firmware -------------------------------------------------- - (c) 2005 Benjamin Herrenschmidt <benh at kernel.crashing.org>, IBM Corp. (c) 2005 Becky Bruce <becky.bruce at freescale.com>, @@ -9,6 +8,62 @@ (c) 2006 MontaVista Software, Inc. Flash chip node definition +Table of Contents +================= + + I - Introduction + 1) Entry point for arch/powerpc + 2) Board support + + II - The DT block format + 1) Header + 2) Device tree generalities + 3) Device tree "structure" block + 4) Device tree "strings" block + + III - Required content of the device tree + 1) Note about cells and address representation + 2) Note about "compatible" properties + 3) Note about "name" properties + 4) Note about node and property names and character set + 5) Required nodes and properties + a) The root node + b) The /cpus node + c) The /cpus/* nodes + d) the /memory node(s) + e) The /chosen node + f) the /soc<SOCname> node + + IV - "dtc", the device tree compiler + + V - Recommendations for a bootloader + + VI - System-on-a-chip devices and nodes + 1) Defining child nodes of an SOC + 2) Representing devices without a current OF specification + a) MDIO IO device + c) PHY nodes + b) Gianfar-compatible ethernet nodes + d) Interrupt controllers + e) I2C + f) Freescale SOC USB controllers + g) Freescale SOC SEC Security Engines + h) Board Control and Status (BCSR) + i) Freescale QUICC Engine module (QE) + g) Flash chip nodes + + VII - Specifying interrupt information for devices + 1) interrupts property + 2) interrupt-parent property + 3) OpenPIC Interrupt Controllers + 4) ISA Interrupt Controllers + + Appendix A - Sample SOC node for MPC8540 + + +Revision Information +==================== + May 18, 2005: Rev 0.1 - Initial draft, no chapter III yet. May 19, 2005: Rev 0.2 - Add chapter III and bits & pieces here or @@ -1687,7 +1742,7 @@ platforms are moved over to use the flattened-device-tree model. }; }; - g) Flash chip nodes + j) Flash chip nodes Flash chips (Memory Technology Devices) are often used for solid state file systems on embedded devices. @@ -1,7 +1,7 @@ VERSION = 2 PATCHLEVEL = 6 SUBLEVEL = 22 -EXTRAVERSION = -rc3 +EXTRAVERSION = -rc4 NAME = Jeff Thinks I Should Change This, But To What? # *DOCUMENTATION* diff --git a/arch/i386/kernel/cpu/mtrr/main.c b/arch/i386/kernel/cpu/mtrr/main.c index 1cf466df330..7202b98aac4 100644 --- a/arch/i386/kernel/cpu/mtrr/main.c +++ b/arch/i386/kernel/cpu/mtrr/main.c @@ -734,10 +734,13 @@ void mtrr_ap_init(void) */ void mtrr_save_state(void) { - if (smp_processor_id() == 0) + int cpu = get_cpu(); + + if (cpu == 0) mtrr_save_fixed_ranges(NULL); else smp_call_function_single(0, mtrr_save_fixed_ranges, NULL, 1, 1); + put_cpu(); } static int __init mtrr_init_finialize(void) diff --git a/arch/powerpc/boot/crt0.S b/arch/powerpc/boot/crt0.S index 5a4215c4b01..f1c4dfc635b 100644 --- a/arch/powerpc/boot/crt0.S +++ b/arch/powerpc/boot/crt0.S @@ -13,6 +13,7 @@ .text /* a procedure descriptor used when booting this as a COFF file */ + .globl _zimage_start_opd _zimage_start_opd: .long _zimage_start, 0, 0, 0 diff --git a/arch/powerpc/kernel/of_platform.c b/arch/powerpc/kernel/of_platform.c index d501c23e515..d454f61c9c7 100644 --- a/arch/powerpc/kernel/of_platform.c +++ b/arch/powerpc/kernel/of_platform.c @@ -433,7 +433,7 @@ static int __devinit of_pci_phb_probe(struct of_device *dev, * Note also that we don't do ISA, this will also be fixed with a * more massive rework. */ - pci_setup_phb_io(phb, 0); + pci_setup_phb_io(phb, pci_io_base == 0); /* Init pci_dn data structures */ pci_devs_phb_init_dynamic(phb); diff --git a/arch/powerpc/platforms/cell/cbe_cpufreq.c b/arch/powerpc/platforms/cell/cbe_cpufreq.c index f9ac3fe3be9..ac445998d83 100644 --- a/arch/powerpc/platforms/cell/cbe_cpufreq.c +++ b/arch/powerpc/platforms/cell/cbe_cpufreq.c @@ -67,6 +67,7 @@ static u64 MIC_Slow_Next_Timer_table[] = { 0x00003FC000000000ull, }; +static unsigned int pmi_frequency_limit = 0; /* * hardware specific functions */ @@ -164,7 +165,6 @@ static int set_pmode(int cpu, unsigned int slow_mode) { static void cbe_cpufreq_handle_pmi(struct of_device *dev, pmi_message_t pmi_msg) { - struct cpufreq_policy policy; u8 cpu; u8 cbe_pmode_new; @@ -173,15 +173,27 @@ static void cbe_cpufreq_handle_pmi(struct of_device *dev, pmi_message_t pmi_msg) cpu = cbe_node_to_cpu(pmi_msg.data1); cbe_pmode_new = pmi_msg.data2; - cpufreq_get_policy(&policy, cpu); + pmi_frequency_limit = cbe_freqs[cbe_pmode_new].frequency; - policy.max = min(policy.max, cbe_freqs[cbe_pmode_new].frequency); - policy.min = min(policy.min, policy.max); + pr_debug("cbe_handle_pmi: max freq=%d\n", pmi_frequency_limit); +} + +static int pmi_notifier(struct notifier_block *nb, + unsigned long event, void *data) +{ + struct cpufreq_policy *policy = data; - pr_debug("cbe_handle_pmi: new policy.min=%d policy.max=%d\n", policy.min, policy.max); - cpufreq_set_policy(&policy); + if (event != CPUFREQ_INCOMPATIBLE) + return 0; + + cpufreq_verify_within_limits(policy, 0, pmi_frequency_limit); + return 0; } +static struct notifier_block pmi_notifier_block = { + .notifier_call = pmi_notifier, +}; + static struct pmi_handler cbe_pmi_handler = { .type = PMI_TYPE_FREQ_CHANGE, .handle_pmi_message = cbe_cpufreq_handle_pmi, @@ -238,12 +250,21 @@ static int cbe_cpufreq_cpu_init(struct cpufreq_policy *policy) cpufreq_frequency_table_get_attr(cbe_freqs, policy->cpu); + if (pmi_dev) { + /* frequency might get limited later, initialize limit with max_freq */ + pmi_frequency_limit = max_freq; + cpufreq_register_notifier(&pmi_notifier_block, CPUFREQ_POLICY_NOTIFIER); + } + /* this ensures that policy->cpuinfo_min and policy->cpuinfo_max are set correctly */ return cpufreq_frequency_table_cpuinfo(policy, cbe_freqs); } static int cbe_cpufreq_cpu_exit(struct cpufreq_policy *policy) { + if (pmi_dev) + cpufreq_unregister_notifier(&pmi_notifier_block, CPUFREQ_POLICY_NOTIFIER); + cpufreq_frequency_table_put_attr(policy->cpu); return 0; } diff --git a/arch/powerpc/platforms/cell/spufs/context.c b/arch/powerpc/platforms/cell/spufs/context.c index 8654749e317..7c51cb54bca 100644 --- a/arch/powerpc/platforms/cell/spufs/context.c +++ b/arch/powerpc/platforms/cell/spufs/context.c @@ -39,7 +39,7 @@ struct spu_context *alloc_spu_context(struct spu_gang *gang) if (spu_init_csa(&ctx->csa)) goto out_free; spin_lock_init(&ctx->mmio_lock); - spin_lock_init(&ctx->mapping_lock); + mutex_init(&ctx->mapping_lock); kref_init(&ctx->kref); mutex_init(&ctx->state_mutex); mutex_init(&ctx->run_mutex); @@ -103,6 +103,7 @@ void spu_forget(struct spu_context *ctx) void spu_unmap_mappings(struct spu_context *ctx) { + mutex_lock(&ctx->mapping_lock); if (ctx->local_store) unmap_mapping_range(ctx->local_store, 0, LS_SIZE, 1); if (ctx->mfc) @@ -117,6 +118,7 @@ void spu_unmap_mappings(struct spu_context *ctx) unmap_mapping_range(ctx->mss, 0, 0x1000, 1); if (ctx->psmap) unmap_mapping_range(ctx->psmap, 0, 0x20000, 1); + mutex_unlock(&ctx->mapping_lock); } /** diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c index 45614c73c78..b1e7e2f8a2e 100644 --- a/arch/powerpc/platforms/cell/spufs/file.c +++ b/arch/powerpc/platforms/cell/spufs/file.c @@ -45,11 +45,11 @@ spufs_mem_open(struct inode *inode, struct file *file) struct spufs_inode_info *i = SPUFS_I(inode); struct spu_context *ctx = i->i_ctx; - spin_lock(&ctx->mapping_lock); + mutex_lock(&ctx->mapping_lock); file->private_data = ctx; if (!i->i_openers++) ctx->local_store = inode->i_mapping; - spin_unlock(&ctx->mapping_lock); + mutex_unlock(&ctx->mapping_lock); return 0; } @@ -59,10 +59,10 @@ spufs_mem_release(struct inode *inode, struct file *file) struct spufs_inode_info *i = SPUFS_I(inode); struct spu_context *ctx = i->i_ctx; - spin_lock(&ctx->mapping_lock); + mutex_lock(&ctx->mapping_lock); if (!--i->i_openers) ctx->local_store = NULL; - spin_unlock(&ctx->mapping_lock); + mutex_unlock(&ctx->mapping_lock); return 0; } @@ -217,6 +217,7 @@ unsigned long spufs_get_unmapped_area(struct file *file, unsigned long addr, static const struct file_operations spufs_mem_fops = { .open = spufs_mem_open, + .release = spufs_mem_release, .read = spufs_mem_read, .write = spufs_mem_write, .llseek = generic_file_llseek, @@ -309,11 +310,11 @@ static int spufs_cntl_open(struct inode *inode, struct file *file) struct spufs_inode_info *i = SPUFS_I(inode); struct spu_context *ctx = i->i_ctx; - spin_lock(&ctx->mapping_lock); + mutex_lock(&ctx->mapping_lock); file->private_data = ctx; if (!i->i_openers++) ctx->cntl = inode->i_mapping; - spin_unlock(&ctx->mapping_lock); + mutex_unlock(&ctx->mapping_lock); return simple_attr_open(inode, file, spufs_cntl_get, spufs_cntl_set, "0x%08lx"); } @@ -326,10 +327,10 @@ spufs_cntl_release(struct inode *inode, struct file *file) simple_attr_close(inode, file); - spin_lock(&ctx->mapping_lock); + mutex_lock(&ctx->mapping_lock); if (!--i->i_openers) ctx->cntl = NULL; - spin_unlock(&ctx->mapping_lock); + mutex_unlock(&ctx->mapping_lock); return 0; } @@ -812,11 +813,11 @@ static int spufs_signal1_open(struct inode *inode, struct file *file) struct spufs_inode_info *i = SPUFS_I(inode); struct spu_context *ctx = i->i_ctx; - spin_lock(&ctx->mapping_lock); + mutex_lock(&ctx->mapping_lock); file->private_data = ctx; if (!i->i_openers++) ctx->signal1 = inode->i_mapping; - spin_unlock(&ctx->mapping_lock); + mutex_unlock(&ctx->mapping_lock); return nonseekable_open(inode, file); } @@ -826,10 +827,10 @@ spufs_signal1_release(struct inode *inode, struct file *file) struct spufs_inode_info *i = SPUFS_I(inode); struct spu_context *ctx = i->i_ctx; - spin_lock(&ctx->mapping_lock); + mutex_lock(&ctx->mapping_lock); if (!--i->i_openers) ctx->signal1 = NULL; - spin_unlock(&ctx->mapping_lock); + mutex_unlock(&ctx->mapping_lock); return 0; } @@ -936,11 +937,11 @@ static int spufs_signal2_open(struct inode *inode, struct file *file) struct spufs_inode_info *i = SPUFS_I(inode); struct spu_context *ctx = i->i_ctx; - spin_lock(&ctx->mapping_lock); + mutex_lock(&ctx->mapping_lock); file->private_data = ctx; if (!i->i_openers++) ctx->signal2 = inode->i_mapping; - spin_unlock(&ctx->mapping_lock); + mutex_unlock(&ctx->mapping_lock); return nonseekable_open(inode, file); } @@ -950,10 +951,10 @@ spufs_signal2_release(struct inode *inode, struct file *file) struct spufs_inode_info *i = SPUFS_I(inode); struct spu_context *ctx = i->i_ctx; - spin_lock(&ctx->mapping_lock); + mutex_lock(&ctx->mapping_lock); if (!--i->i_openers) ctx->signal2 = NULL; - spin_unlock(&ctx->mapping_lock); + mutex_unlock(&ctx->mapping_lock); return 0; } @@ -1154,10 +1155,10 @@ static int spufs_mss_open(struct inode *inode, struct file *file) file->private_data = i->i_ctx; - spin_lock(&ctx->mapping_lock); + mutex_lock(&ctx->mapping_lock); if (!i->i_openers++) ctx->mss = inode->i_mapping; - spin_unlock(&ctx->mapping_lock); + mutex_unlock(&ctx->mapping_lock); return nonseekable_open(inode, file); } @@ -1167,10 +1168,10 @@ spufs_mss_release(struct inode *inode, struct file *file) struct spufs_inode_info *i = SPUFS_I(inode); struct spu_context *ctx = i->i_ctx; - spin_lock(&ctx->mapping_lock); + mutex_lock(&ctx->mapping_lock); if (!--i->i_openers) ctx->mss = NULL; - spin_unlock(&ctx->mapping_lock); + mutex_unlock(&ctx->mapping_lock); return 0; } @@ -1211,11 +1212,11 @@ static int spufs_psmap_open(struct inode *inode, struct file *file) struct spufs_inode_info *i = SPUFS_I(inode); struct spu_context *ctx = i->i_ctx; - spin_lock(&ctx->mapping_lock); + mutex_lock(&ctx->mapping_lock); file->private_data = i->i_ctx; if (!i->i_openers++) ctx->psmap = inode->i_mapping; - spin_unlock(&ctx->mapping_lock); + mutex_unlock(&ctx->mapping_lock); return nonseekable_open(inode, file); } @@ -1225,10 +1226,10 @@ spufs_psmap_release(struct inode *inode, struct file *file) struct spufs_inode_info *i = SPUFS_I(inode); struct spu_context *ctx = i->i_ctx; - spin_lock(&ctx->mapping_lock); + mutex_lock(&ctx->mapping_lock); if (!--i->i_openers) ctx->psmap = NULL; - spin_unlock(&ctx->mapping_lock); + mutex_unlock(&ctx->mapping_lock); return 0; } @@ -1281,11 +1282,11 @@ static int spufs_mfc_open(struct inode *inode, struct file *file) if (atomic_read(&inode->i_count) != 1) return -EBUSY; - spin_lock(&ctx->mapping_lock); + mutex_lock(&ctx->mapping_lock); file->private_data = ctx; if (!i->i_openers++) ctx->mfc = inode->i_mapping; - spin_unlock(&ctx->mapping_lock); + mutex_unlock(&ctx->mapping_lock); return nonseekable_open(inode, file); } @@ -1295,10 +1296,10 @@ spufs_mfc_release(struct inode *inode, struct file *file) struct spufs_inode_info *i = SPUFS_I(inode); struct spu_context *ctx = i->i_ctx; - spin_lock(&ctx->mapping_lock); + mutex_lock(&ctx->mapping_lock); if (!--i->i_openers) ctx->mfc = NULL; - spin_unlock(&ctx->mapping_lock); + mutex_unlock(&ctx->mapping_lock); return 0; } diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c index 7150730e2ff..9807206e021 100644 --- a/arch/powerpc/platforms/cell/spufs/inode.c +++ b/arch/powerpc/platforms/cell/spufs/inode.c @@ -177,7 +177,7 @@ static int spufs_rmdir(struct inode *parent, struct dentry *dir) static int spufs_fill_dir(struct dentry *dir, struct tree_descr *files, int mode, struct spu_context *ctx) { - struct dentry *dentry; + struct dentry *dentry, *tmp; int ret; while (files->name && files->name[0]) { @@ -193,7 +193,20 @@ static int spufs_fill_dir(struct dentry *dir, struct tree_descr *files, } return 0; out: - spufs_prune_dir(dir); + /* + * remove all children from dir. dir->inode is not set so don't + * just simply use spufs_prune_dir() and panic afterwards :) + * dput() looks like it will do the right thing: + * - dec parent's ref counter + * - remove child from parent's child list + * - free child's inode if possible + * - free child + */ + list_for_each_entry_safe(dentry, tmp, &dir->d_subdirs, d_u.d_child) { + dput(dentry); + } + + shrink_dcache_parent(dir); return ret; } @@ -274,6 +287,7 @@ spufs_mkdir(struct inode *dir, struct dentry *dentry, unsigned int flags, goto out; out_free_ctx: + spu_forget(ctx); put_spu_context(ctx); out_iput: iput(inode); @@ -349,37 +363,6 @@ out: return ret; } -static int spufs_rmgang(struct inode *root, struct dentry *dir) -{ - /* FIXME: this fails if the dir is not empty, - which causes a leak of gangs. */ - return simple_rmdir(root, dir); -} - -static int spufs_gang_close(struct inode *inode, struct file *file) -{ - struct inode *parent; - struct dentry *dir; - int ret; - - dir = file->f_path.dentry; - parent = dir->d_parent->d_inode; - - ret = spufs_rmgang(parent, dir); - WARN_ON(ret); - - return dcache_dir_close(inode, file); -} - -const struct file_operations spufs_gang_fops = { - .open = dcache_dir_open, - .release = spufs_gang_close, - .llseek = dcache_dir_lseek, - .read = generic_read_dir, - .readdir = dcache_readdir, - .fsync = simple_sync_file, -}; - static int spufs_mkgang(struct inode *dir, struct dentry *dentry, int mode) { @@ -407,7 +390,6 @@ spufs_mkgang(struct inode *dir, struct dentry *dentry, int mode) inode->i_fop = &simple_dir_operations; d_instantiate(dentry, inode); - dget(dentry); dir->i_nlink++; dentry->d_inode->i_nlink++; return ret; @@ -437,7 +419,7 @@ static int spufs_gang_open(struct dentry *dentry, struct vfsmount *mnt) goto out; } - filp->f_op = &spufs_gang_fops; + filp->f_op = &simple_dir_operations; fd_install(ret, filp); out: return ret; @@ -458,8 +440,10 @@ static int spufs_create_gang(struct inode *inode, * in error path of *_open(). */ ret = spufs_gang_open(dget(dentry), mntget(mnt)); - if (ret < 0) - WARN_ON(spufs_rmgang(inode, dentry)); + if (ret < 0) { + int err = simple_rmdir(inode, dentry); + WARN_ON(err); + } out: mutex_unlock(&inode->i_mutex); @@ -600,6 +584,10 @@ spufs_create_root(struct super_block *sb, void *data) struct inode *inode; int ret; + ret = -ENODEV; + if (!spu_management_ops) + goto out; + ret = -ENOMEM; inode = spufs_new_inode(sb, S_IFDIR | 0775); if (!inode) diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c index b6ecb30e7d5..3b831e07f1e 100644 --- a/arch/powerpc/platforms/cell/spufs/sched.c +++ b/arch/powerpc/platforms/cell/spufs/sched.c @@ -93,43 +93,6 @@ void spu_stop_tick(struct spu_context *ctx) } } -void spu_sched_tick(struct work_struct *work) -{ - struct spu_context *ctx = - container_of(work, struct spu_context, sched_work.work); - struct spu *spu; - int preempted = 0; - - /* - * If this context is being stopped avoid rescheduling from the - * scheduler tick because we would block on the state_mutex. - * The caller will yield the spu later on anyway. - */ - if (test_bit(SPU_SCHED_EXITING, &ctx->sched_flags)) - return; - - mutex_lock(&ctx->state_mutex); - spu = ctx->spu; - if (spu) { - int best = sched_find_first_bit(spu_prio->bitmap); - if (best <= ctx->prio) { - spu_deactivate(ctx); - preempted = 1; - } - } - mutex_unlock(&ctx->state_mutex); - - if (preempted) { - /* - * We need to break out of the wait loop in spu_run manually - * to ensure this context gets put on the runqueue again - * ASAP. - */ - wake_up(&ctx->stop_wq); - } else - spu_start_tick(ctx); -} - /** * spu_add_to_active_list - add spu to active list * @spu: spu to add to the active list @@ -273,34 +236,6 @@ static void spu_prio_wait(struct spu_context *ctx) remove_wait_queue(&ctx->stop_wq, &wait); } -/** - * spu_reschedule - try to find a runnable context for a spu - * @spu: spu available - * - * This function is called whenever a spu becomes idle. It looks for the - * most suitable runnable spu context and schedules it for execution. - */ -static void spu_reschedule(struct spu *spu) -{ - int best; - - spu_free(spu); - - spin_lock(&spu_prio->runq_lock); - best = sched_find_first_bit(spu_prio->bitmap); - if (best < MAX_PRIO) { - struct list_head *rq = &spu_prio->runq[best]; - struct spu_context *ctx; - - BUG_ON(list_empty(rq)); - - ctx = list_entry(rq->next, struct spu_context, rq); - __spu_del_from_rq(ctx); - wake_up(&ctx->stop_wq); - } - spin_unlock(&spu_prio->runq_lock); -} - static struct spu *spu_get_idle(struct spu_context *ctx) { struct spu *spu = NULL; @@ -429,6 +364,51 @@ int spu_activate(struct spu_context *ctx, unsigned long flags) } /** + * grab_runnable_context - try to find a runnable context + * + * Remove the highest priority context on the runqueue and return it + * to the caller. Returns %NULL if no runnable context was found. + */ +static struct spu_context *grab_runnable_context(int prio) +{ + struct spu_context *ctx = NULL; + int best; + + spin_lock(&spu_prio->runq_lock); + best = sched_find_first_bit(spu_prio->bitmap); + if (best < prio) { + struct list_head *rq = &spu_prio->runq[best]; + + BUG_ON(list_empty(rq)); + + ctx = list_entry(rq->next, struct spu_context, rq); + __spu_del_from_rq(ctx); + } + spin_unlock(&spu_prio->runq_lock); + + return ctx; +} + +static int __spu_deactivate(struct spu_context *ctx, int force, int max_prio) +{ + struct spu *spu = ctx->spu; + struct spu_context *new = NULL; + + if (spu) { + new = grab_runnable_context(max_prio); + if (new || force) { + spu_unbind_context(spu, ctx); + spu_free(spu); + if (new) + wake_up(&new->stop_wq); + } + + } + + return new != NULL; +} + +/** * spu_deactivate - unbind a context from it's physical spu * @ctx: spu context to unbind * @@ -437,12 +417,7 @@ int spu_activate(struct spu_context *ctx, unsigned long flags) */ void spu_deactivate(struct spu_context *ctx) { - struct spu *spu = ctx->spu; - - if (spu) { - spu_unbind_context(spu, ctx); - spu_reschedule(spu); - } + __spu_deactivate(ctx, 1, MAX_PRIO); } /** @@ -455,21 +430,43 @@ void spu_deactivate(struct spu_context *ctx) */ void spu_yield(struct spu_context *ctx) { - struct spu *spu; - - if (mutex_trylock(&ctx->state_mutex)) { - if ((spu = ctx->spu) != NULL) { - int best = sched_find_first_bit(spu_prio->bitmap); - if (best < MAX_PRIO) { - pr_debug("%s: yielding SPU %d NODE %d\n", - __FUNCTION__, spu->number, spu->node); - spu_deactivate(ctx); - } - } + if (!(ctx->flags & SPU_CREATE_NOSCHED)) { + mutex_lock(&ctx->state_mutex); + __spu_deactivate(ctx, 0, MAX_PRIO); mutex_unlock(&ctx->state_mutex); } } +void spu_sched_tick(struct work_struct *work) +{ + struct spu_context *ctx = + container_of(work, struct spu_context, sched_work.work); + int preempted; + + /* + * If this context is being stopped avoid rescheduling from the + * scheduler tick because we would block on the state_mutex. + * The caller will yield the spu later on anyway. + */ + if (test_bit(SPU_SCHED_EXITING, &ctx->sched_flags)) + return; + + mutex_lock(&ctx->state_mutex); + preempted = __spu_deactivate(ctx, 0, ctx->prio + 1); + mutex_unlock(&ctx->state_mutex); + + if (preempted) { + /* + * We need to break out of the wait loop in spu_run manually + * to ensure this context gets put on the runqueue again + * ASAP. + */ + wake_up(&ctx->stop_wq); + } else { + spu_start_tick(ctx); + } +} + int __init spu_sched_init(void) { int i; diff --git a/arch/powerpc/platforms/cell/spufs/spufs.h b/arch/powerpc/platforms/cell/spufs/spufs.h index 0a947fd7de5..47617e8014a 100644 --- a/arch/powerpc/platforms/cell/spufs/spufs.h +++ b/arch/powerpc/platforms/cell/spufs/spufs.h @@ -55,7 +55,7 @@ struct spu_context { struct address_space *signal2; /* 'signal2' area mappings. */ struct address_space *mss; /* 'mss' area mappings. */ struct address_space *psmap; /* 'psmap' area mappings. */ - spinlock_t mapping_lock; + struct mutex mapping_lock; u64 object_id; /* user space pointer for oprofile */ enum { SPU_STATE_RUNNABLE, SPU_STATE_SAVED } state; diff --git a/arch/powerpc/platforms/celleb/Makefile b/arch/powerpc/platforms/celleb/Makefile index f4f82520dc4..5240046d867 100644 --- a/arch/powerpc/platforms/celleb/Makefile +++ b/arch/powerpc/platforms/celleb/Makefile @@ -4,5 +4,5 @@ obj-y += interrupt.o iommu.o setup.o \ obj-$(CONFIG_SMP) += smp.o obj-$(CONFIG_PPC_UDBG_BEAT) += udbg_beat.o -obj-$(CONFIG_HAS_TXX9_SERIAL) += scc_sio.o +obj-$(CONFIG_SERIAL_TXX9) += scc_sio.o obj-$(CONFIG_SPU_BASE) += spu_priv1.o diff --git a/arch/powerpc/platforms/pasemi/iommu.c b/arch/powerpc/platforms/pasemi/iommu.c index 95fa6a7d15e..f33b21b9f5d 100644 --- a/arch/powerpc/platforms/pasemi/iommu.c +++ b/arch/powerpc/platforms/pasemi/iommu.c @@ -31,8 +31,6 @@ #define IOBMAP_PAGE_SIZE (1 << IOBMAP_PAGE_SHIFT) #define IOBMAP_PAGE_MASK (IOBMAP_PAGE_SIZE - 1) -#define IOBMAP_PAGE_FACTOR (PAGE_SHIFT - IOBMAP_PAGE_SHIFT) - #define IOB_BASE 0xe0000000 #define IOB_SIZE 0x3000 /* Configuration registers */ @@ -97,9 +95,6 @@ static void iobmap_build(struct iommu_table *tbl, long index, bus_addr = (tbl->it_offset + index) << PAGE_SHIFT; - npages <<= IOBMAP_PAGE_FACTOR; - index <<= IOBMAP_PAGE_FACTOR; - ip = ((u32 *)tbl->it_base) + index; while (npages--) { @@ -125,9 +120,6 @@ static void iobmap_free(struct iommu_table *tbl, long index, bus_addr = (tbl->it_offset + index) << PAGE_SHIFT; - npages <<= IOBMAP_PAGE_FACTOR; - index <<= IOBMAP_PAGE_FACTOR; - ip = ((u32 *)tbl->it_base) + index; while (npages--) { diff --git a/arch/sparc64/Kconfig b/arch/sparc64/Kconfig index bd00f89eed1..89a1b469b93 100644 --- a/arch/sparc64/Kconfig +++ b/arch/sparc64/Kconfig @@ -396,6 +396,15 @@ config SCHED_SMT when dealing with UltraSPARC cpus at a cost of slightly increased overhead in some places. If unsure say N here. +config SCHED_MC + bool "Multi-core scheduler support" + depends on SMP + default y + help + Multi-core scheduler support improves the CPU scheduler's decision + making when dealing with multi-core CPU chips at a cost of slightly + increased overhead in some places. If unsure say N here. + source "kernel/Kconfig.preempt" config CMDLINE_BOOL diff --git a/arch/sparc64/kernel/Makefile b/arch/sparc64/kernel/Makefile index d8d19093d12..f964bf28d21 100644 --- a/arch/sparc64/kernel/Makefile +++ b/arch/sparc64/kernel/Makefile @@ -1,4 +1,4 @@ -# $Id: Makefile,v 1.70 2002/02/09 19:49:30 davem Exp $ +# # Makefile for the linux kernel. # @@ -8,7 +8,7 @@ EXTRA_CFLAGS := -Werror extra-y := head.o init_task.o vmlinux.lds obj-y := process.o setup.o cpu.o idprom.o \ - traps.o auxio.o una_asm.o \ + traps.o auxio.o una_asm.o sysfs.o \ irq.o ptrace.o time.o sys_sparc.o signal.o \ unaligned.o central.o pci.o starfire.o semaphore.o \ power.o sbus.o iommu_common.o sparc64_ksyms.o chmc.o \ diff --git a/arch/sparc64/kernel/entry.S b/arch/sparc64/kernel/entry.S index ed712e0b337..7d1a11822a1 100644 --- a/arch/sparc64/kernel/entry.S +++ b/arch/sparc64/kernel/entry.S @@ -2514,9 +2514,9 @@ sun4v_ncs_request: nop .size sun4v_ncs_request, .-sun4v_ncs_request - .globl sun4v_scv_send - .type sun4v_scv_send,#function -sun4v_scv_send: + .globl sun4v_svc_send + .type sun4v_svc_send,#function +sun4v_svc_send: save %sp, -192, %sp mov %i0, %o0 mov %i1, %o1 @@ -2526,11 +2526,11 @@ sun4v_scv_send: stx %o1, [%i3] ret restore - .size sun4v_scv_send, .-sun4v_scv_send + .size sun4v_svc_send, .-sun4v_svc_send - .globl sun4v_scv_recv - .type sun4v_scv_recv,#function -sun4v_scv_recv: + .globl sun4v_svc_recv + .type sun4v_svc_recv,#function +sun4v_svc_recv: save %sp, -192, %sp mov %i0, %o0 mov %i1, %o1 @@ -2540,33 +2540,55 @@ sun4v_scv_recv: stx %o1, [%i3] ret restore - .size sun4v_scv_recv, .-sun4v_scv_recv + .size sun4v_svc_recv, .-sun4v_svc_recv - .globl sun4v_scv_getstatus - .type sun4v_scv_getstatus,#function -sun4v_scv_getstatus: + .globl sun4v_svc_getstatus + .type sun4v_svc_getstatus,#function +sun4v_svc_getstatus: mov HV_FAST_SVC_GETSTATUS, %o5 mov %o1, %o4 ta HV_FAST_TRAP stx %o1, [%o4] retl nop - .size sun4v_scv_getstatus, .-sun4v_scv_getstatus + .size sun4v_svc_getstatus, .-sun4v_svc_getstatus - .globl sun4v_scv_setstatus - .type sun4v_scv_setstatus,#function -sun4v_scv_setstatus: + .globl sun4v_svc_setstatus + .type sun4v_svc_setstatus,#function +sun4v_svc_setstatus: mov HV_FAST_SVC_SETSTATUS, %o5 ta HV_FAST_TRAP retl nop - .size sun4v_scv_setstatus, .-sun4v_scv_setstatus + .size sun4v_svc_setstatus, .-sun4v_svc_setstatus - .globl sun4v_scv_clrstatus - .type sun4v_scv_clrstatus,#function -sun4v_scv_clrstatus: + .globl sun4v_svc_clrstatus + .type sun4v_svc_clrstatus,#function +sun4v_svc_clrstatus: mov HV_FAST_SVC_CLRSTATUS, %o5 ta HV_FAST_TRAP retl nop - .size sun4v_scv_clrstatus, .-sun4v_scv_clrstatus + .size sun4v_svc_clrstatus, .-sun4v_svc_clrstatus + + .globl sun4v_mmustat_conf + .type sun4v_mmustat_conf,#function +sun4v_mmustat_conf: + mov %o1, %o4 + mov HV_FAST_MMUSTAT_CONF, %o5 + ta HV_FAST_TRAP + stx %o1, [%o4] + retl + nop + .size sun4v_mmustat_conf, .-sun4v_mmustat_conf + + .globl sun4v_mmustat_info + .type sun4v_mmustat_info,#function +sun4v_mmustat_info: + mov %o0, %o4 + mov HV_FAST_MMUSTAT_INFO, %o5 + ta HV_FAST_TRAP + stx %o1, [%o4] + retl + nop + .size sun4v_mmustat_info, .-sun4v_mmustat_info diff --git a/arch/sparc64/kernel/mdesc.c b/arch/sparc64/kernel/mdesc.c index 9246c2cf957..f0e16045fb1 100644 --- a/arch/sparc64/kernel/mdesc.c +++ b/arch/sparc64/kernel/mdesc.c @@ -473,6 +473,53 @@ static void __init set_core_ids(void) } } +static void __init mark_proc_ids(struct mdesc_node *mp, int proc_id) +{ + int i; + + for (i = 0; i < mp->num_arcs; i++) { + struct mdesc_node *t = mp->arcs[i].arc; + const u64 *id; + + if (strcmp(mp->arcs[i].name, "back")) + continue; + + if (strcmp(t->name, "cpu")) + continue; + + id = md_get_property(t, "id", NULL); + if (*id < NR_CPUS) + cpu_data(*id).proc_id = proc_id; + } +} + +static void __init __set_proc_ids(const char *exec_unit_name) +{ + struct mdesc_node *mp; + int idx; + + idx = 0; + md_for_each_node_by_name(mp, exec_unit_name) { + const char *type; + int len; + + type = md_get_property(mp, "type", &len); + if (!find_in_proplist(type, "int", len) && + !find_in_proplist(type, "integer", len)) + continue; + + mark_proc_ids(mp, idx); + + idx++; + } +} + +static void __init set_proc_ids(void) +{ + __set_proc_ids("exec_unit"); + __set_proc_ids("exec-unit"); +} + static void __init get_one_mondo_bits(const u64 *p, unsigned int *mask, unsigned char def) { u64 val; @@ -574,9 +621,15 @@ static void __init mdesc_fill_in_cpu_data(void) #endif c->core_id = 0; + c->proc_id = -1; } +#ifdef CONFIG_SMP + sparc64_multi_core = 1; +#endif + set_core_ids(); + set_proc_ids(); smp_fill_in_sib_core_maps(); } diff --git a/arch/sparc64/kernel/prom.c b/arch/sparc64/kernel/prom.c index dad4b3ba705..6f4a5284b0e 100644 --- a/arch/sparc64/kernel/prom.c +++ b/arch/sparc64/kernel/prom.c @@ -1781,6 +1781,10 @@ static void __init of_fill_in_cpu_data(void) } cpu_data(cpuid).core_id = portid + 1; + cpu_data(cpuid).proc_id = portid; +#ifdef CONFIG_SMP + sparc64_multi_core = 1; +#endif } else { cpu_data(cpuid).dcache_size = of_getintprop_default(dp, "dcache-size", 16 * 1024); @@ -1799,6 +1803,7 @@ static void __init of_fill_in_cpu_data(void) of_getintprop_default(dp, "ecache-line-size", 64); cpu_data(cpuid).core_id = 0; + cpu_data(cpuid).proc_id = -1; } #ifdef CONFIG_SMP diff --git a/arch/sparc64/kernel/setup.c b/arch/sparc64/kernel/setup.c index de9b4c13f1c..7490cc670a5 100644 --- a/arch/sparc64/kernel/setup.c +++ b/arch/sparc64/kernel/setup.c @@ -513,22 +513,3 @@ void sun_do_break(void) int serial_console = -1; int stop_a_enabled = 1; - -static int __init topology_init(void) -{ - int i, err; - - err = -ENOMEM; - - for_each_possible_cpu(i) { - struct cpu *p = kzalloc(sizeof(*p), GFP_KERNEL); - if (p) { - register_cpu(p, i); - err = 0; - } - } - - return err; -} - -subsys_initcall(topology_init); diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c index c550bba3490..4dcd7d0b60f 100644 --- a/arch/sparc64/kernel/smp.c +++ b/arch/sparc64/kernel/smp.c @@ -44,6 +44,8 @@ extern void calibrate_delay(void); +int sparc64_multi_core __read_mostly; + /* Please don't make this stuff initdata!!! --DaveM */ unsigned char boot_cpu_id; @@ -51,6 +53,8 @@ cpumask_t cpu_online_map __read_mostly = CPU_MASK_NONE; cpumask_t phys_cpu_present_map __read_mostly = CPU_MASK_NONE; cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = CPU_MASK_NONE }; +cpumask_t cpu_core_map[NR_CPUS] __read_mostly = + { [0 ... NR_CPUS-1] = CPU_MASK_NONE }; static cpumask_t smp_commenced_mask; static cpumask_t cpu_callout_map; @@ -1217,13 +1221,28 @@ void __devinit smp_fill_in_sib_core_maps(void) unsigned int j; if (cpu_data(i).core_id == 0) { - cpu_set(i, cpu_sibling_map[i]); + cpu_set(i, cpu_core_map[i]); continue; } for_each_possible_cpu(j) { if (cpu_data(i).core_id == cpu_data(j).core_id) + cpu_set(j, cpu_core_map[i]); + } + } + + for_each_possible_cpu(i) { + unsigned int j; + + if (cpu_data(i).proc_id == -1) { + cpu_set(i, cpu_sibling_map[i]); + continue; + } + + for_each_possible_cpu(j) { + if (cpu_data(i).proc_id == + cpu_data(j).proc_id) cpu_set(j, cpu_sibling_map[i]); } } diff --git a/arch/sparc64/kernel/sysfs.c b/arch/sparc64/kernel/sysfs.c new file mode 100644 index 00000000000..cdb1477af89 --- /dev/null +++ b/arch/sparc64/kernel/sysfs.c @@ -0,0 +1,297 @@ +/* sysfs.c: Toplogy sysfs support code for sparc64. + * + * Copyright (C) 2007 David S. Miller <davem@davemloft.net> + */ +#include <linux/sysdev.h> +#include <linux/cpu.h> +#include <linux/smp.h> +#include <linux/percpu.h> +#include <linux/init.h> + +#include <asm/hypervisor.h> +#include <asm/spitfire.h> + +static DEFINE_PER_CPU(struct hv_mmu_statistics, mmu_stats) __attribute__((aligned(64))); + +#define SHOW_MMUSTAT_ULONG(NAME) \ +static ssize_t show_##NAME(struct sys_device *dev, char *buf) \ +{ \ + struct hv_mmu_statistics *p = &per_cpu(mmu_stats, dev->id); \ + return sprintf(buf, "%lu\n", p->NAME); \ +} \ +static SYSDEV_ATTR(NAME, 0444, show_##NAME, NULL) + +SHOW_MMUSTAT_ULONG(immu_tsb_hits_ctx0_8k_tte); +SHOW_MMUSTAT_ULONG(immu_tsb_ticks_ctx0_8k_tte); +SHOW_MMUSTAT_ULONG(immu_tsb_hits_ctx0_64k_tte); +SHOW_MMUSTAT_ULONG(immu_tsb_ticks_ctx0_64k_tte); +SHOW_MMUSTAT_ULONG(immu_tsb_hits_ctx0_4mb_tte); +SHOW_MMUSTAT_ULONG(immu_tsb_ticks_ctx0_4mb_tte); +SHOW_MMUSTAT_ULONG(immu_tsb_hits_ctx0_256mb_tte); +SHOW_MMUSTAT_ULONG(immu_tsb_ticks_ctx0_256mb_tte); +SHOW_MMUSTAT_ULONG(immu_tsb_hits_ctxnon0_8k_tte); +SHOW_MMUSTAT_ULONG(immu_tsb_ticks_ctxnon0_8k_tte); +SHOW_MMUSTAT_ULONG(immu_tsb_hits_ctxnon0_64k_tte); +SHOW_MMUSTAT_ULONG(immu_tsb_ticks_ctxnon0_64k_tte); +SHOW_MMUSTAT_ULONG(immu_tsb_hits_ctxnon0_4mb_tte); +SHOW_MMUSTAT_ULONG(immu_tsb_ticks_ctxnon0_4mb_tte); +SHOW_MMUSTAT_ULONG(immu_tsb_hits_ctxnon0_256mb_tte); +SHOW_MMUSTAT_ULONG(immu_tsb_ticks_ctxnon0_256mb_tte); +SHOW_MMUSTAT_ULONG(dmmu_tsb_hits_ctx0_8k_tte); +SHOW_MMUSTAT_ULONG(dmmu_tsb_ticks_ctx0_8k_tte); +SHOW_MMUSTAT_ULONG(dmmu_tsb_hits_ctx0_64k_tte); +SHOW_MMUSTAT_ULONG(dmmu_tsb_ticks_ctx0_64k_tte); +SHOW_MMUSTAT_ULONG(dmmu_tsb_hits_ctx0_4mb_tte); +SHOW_MMUSTAT_ULONG(dmmu_tsb_ticks_ctx0_4mb_tte); +SHOW_MMUSTAT_ULONG(dmmu_tsb_hits_ctx0_256mb_tte); +SHOW_MMUSTAT_ULONG(dmmu_tsb_ticks_ctx0_256mb_tte); +SHOW_MMUSTAT_ULONG(dmmu_tsb_hits_ctxnon0_8k_tte); +SHOW_MMUSTAT_ULONG(dmmu_tsb_ticks_ctxnon0_8k_tte); +SHOW_MMUSTAT_ULONG(dmmu_tsb_hits_ctxnon0_64k_tte); +SHOW_MMUSTAT_ULONG(dmmu_tsb_ticks_ctxnon0_64k_tte); +SHOW_MMUSTAT_ULONG(dmmu_tsb_hits_ctxnon0_4mb_tte); +SHOW_MMUSTAT_ULONG(dmmu_tsb_ticks_ctxnon0_4mb_tte); +SHOW_MMUSTAT_ULONG(dmmu_tsb_hits_ctxnon0_256mb_tte); +SHOW_MMUSTAT_ULONG(dmmu_tsb_ticks_ctxnon0_256mb_tte); + +static struct attribute *mmu_stat_attrs[] = { + &attr_immu_tsb_hits_ctx0_8k_tte.attr, + &attr_immu_tsb_ticks_ctx0_8k_tte.attr, + &attr_immu_tsb_hits_ctx0_64k_tte.attr, + &attr_immu_tsb_ticks_ctx0_64k_tte.attr, + &attr_immu_tsb_hits_ctx0_4mb_tte.attr, + &attr_immu_tsb_ticks_ctx0_4mb_tte.attr, + &attr_immu_tsb_hits_ctx0_256mb_tte.attr, + &attr_immu_tsb_ticks_ctx0_256mb_tte.attr, + &attr_immu_tsb_hits_ctxnon0_8k_tte.attr, + &attr_immu_tsb_ticks_ctxnon0_8k_tte.attr, + &attr_immu_tsb_hits_ctxnon0_64k_tte.attr, + &attr_immu_tsb_ticks_ctxnon0_64k_tte.attr, + &attr_immu_tsb_hits_ctxnon0_4mb_tte.attr, + &attr_immu_tsb_ticks_ctxnon0_4mb_tte.attr, + &attr_immu_tsb_hits_ctxnon0_256mb_tte.attr, + &attr_immu_tsb_ticks_ctxnon0_256mb_tte.attr, + &attr_dmmu_tsb_hits_ctx0_8k_tte.attr, + &attr_dmmu_tsb_ticks_ctx0_8k_tte.attr, + &attr_dmmu_tsb_hits_ctx0_64k_tte.attr, + &attr_dmmu_tsb_ticks_ctx0_64k_tte.attr, + &attr_dmmu_tsb_hits_ctx0_4mb_tte.attr, + &attr_dmmu_tsb_ticks_ctx0_4mb_tte.attr, + &attr_dmmu_tsb_hits_ctx0_256mb_tte.attr, + &attr_dmmu_tsb_ticks_ctx0_256mb_tte.attr, + &attr_dmmu_tsb_hits_ctxnon0_8k_tte.attr, + &attr_dmmu_tsb_ticks_ctxnon0_8k_tte.attr, + &attr_dmmu_tsb_hits_ctxnon0_64k_tte.attr, + &attr_dmmu_tsb_ticks_ctxnon0_64k_tte.attr, + &attr_dmmu_tsb_hits_ctxnon0_4mb_tte.attr, + &attr_dmmu_tsb_ticks_ctxnon0_4mb_tte.attr, + &attr_dmmu_tsb_hits_ctxnon0_256mb_tte.attr, + &attr_dmmu_tsb_ticks_ctxnon0_256mb_tte.attr, + NULL, +}; + +static struct attribute_group mmu_stat_group = { + .attrs = mmu_stat_attrs, + .name = "mmu_stats", +}; + +/* XXX convert to rusty's on_one_cpu */ +static unsigned long run_on_cpu(unsigned long cpu, + unsigned long (*func)(unsigned long), + unsigned long arg) +{ + cpumask_t old_affinity = current->cpus_allowed; + unsigned long ret; + + /* should return -EINVAL to userspace */ + if (set_cpus_allowed(current, cpumask_of_cpu(cpu))) + return 0; + + ret = func(arg); + + set_cpus_allowed(current, old_affinity); + + return ret; +} + +static unsigned long read_mmustat_enable(unsigned long junk) +{ + unsigned long ra = 0; + + sun4v_mmustat_info(&ra); + + return ra != 0; +} + +static unsigned long write_mmustat_enable(unsigned long val) +{ + unsigned long ra, orig_ra; + + if (val) + ra = __pa(&per_cpu(mmu_stats, smp_processor_id())); + else + ra = 0UL; + + return sun4v_mmustat_conf(ra, &orig_ra); +} + +static ssize_t show_mmustat_enable(struct sys_device *s, char *buf) +{ + unsigned long val = run_on_cpu(s->id, read_mmustat_enable, 0); + return sprintf(buf, "%lx\n", val); +} + +static ssize_t store_mmustat_enable(struct sys_device *s, const char *buf, size_t count) +{ + unsigned long val, err; + int ret = sscanf(buf, "%ld", &val); + + if (ret != 1) + return -EINVAL; + + err = run_on_cpu(s->id, write_mmustat_enable, val); + if (err) + return -EIO; + + return count; +} + +static SYSDEV_ATTR(mmustat_enable, 0644, show_mmustat_enable, store_mmustat_enable); + +static int mmu_stats_supported; + +static int register_mmu_stats(struct sys_device *s) +{ + if (!mmu_stats_supported) + return 0; + sysdev_create_file(s, &attr_mmustat_enable); + return sysfs_create_group(&s->kobj, &mmu_stat_group); +} + +#ifdef CONFIG_HOTPLUG_CPU +static void unregister_mmu_stats(struct sys_device *s) +{ + if (!mmu_stats_supported) + return; + sysfs_remove_group(&s->kobj, &mmu_stat_group); + sysdev_remove_file(s, &attr_mmustat_enable); +} +#endif + +#define SHOW_CPUDATA_ULONG_NAME(NAME, MEMBER) \ +static ssize_t show_##NAME(struct sys_device *dev, char *buf) \ +{ \ + cpuinfo_sparc *c = &cpu_data(dev->id); \ + return sprintf(buf, "%lu\n", c->MEMBER); \ +} + +#define SHOW_CPUDATA_UINT_NAME(NAME, MEMBER) \ +static ssize_t show_##NAME(struct sys_device *dev, char *buf) \ +{ \ + cpuinfo_sparc *c = &cpu_data(dev->id); \ + return sprintf(buf, "%u\n", c->MEMBER); \ +} + +SHOW_CPUDATA_ULONG_NAME(clock_tick, clock_tick); +SHOW_CPUDATA_ULONG_NAME(udelay_val, udelay_val); +SHOW_CPUDATA_UINT_NAME(l1_dcache_size, dcache_size); +SHOW_CPUDATA_UINT_NAME(l1_dcache_line_size, dcache_line_size); +SHOW_CPUDATA_UINT_NAME(l1_icache_size, icache_size); +SHOW_CPUDATA_UINT_NAME(l1_icache_line_size, icache_line_size); +SHOW_CPUDATA_UINT_NAME(l2_cache_size, ecache_size); +SHOW_CPUDATA_UINT_NAME(l2_cache_line_size, ecache_line_size); + +static struct sysdev_attribute cpu_core_attrs[] = { + _SYSDEV_ATTR(clock_tick, 0444, show_clock_tick, NULL), + _SYSDEV_ATTR(udelay_val, 0444, show_udelay_val, NULL), + _SYSDEV_ATTR(l1_dcache_size, 0444, show_l1_dcache_size, NULL), + _SYSDEV_ATTR(l1_dcache_line_size, 0444, show_l1_dcache_line_size, NULL), + _SYSDEV_ATTR(l1_icache_size, 0444, show_l1_icache_size, NULL), + _SYSDEV_ATTR(l1_icache_line_size, 0444, show_l1_icache_line_size, NULL), + _SYSDEV_ATTR(l2_cache_size, 0444, show_l2_cache_size, NULL), + _SYSDEV_ATTR(l2_cache_line_size, 0444, show_l2_cache_line_size, NULL), +}; + +static DEFINE_PER_CPU(struct cpu, cpu_devices); + +static void register_cpu_online(unsigned int cpu) +{ + struct cpu *c = &per_cpu(cpu_devices, cpu); + struct sys_device *s = &c->sysdev; + int i; + + for (i = 0; i < ARRAY_SIZE(cpu_core_attrs); i++) + sysdev_create_file(s, &cpu_core_attrs[i]); + + register_mmu_stats(s); +} + +#ifdef CONFIG_HOTPLUG_CPU +static void unregister_cpu_online(unsigned int cpu) +{ + struct cpu *c = &per_cpu(cpu_devices, cpu); + struct sys_device *s = &c->sysdev; + int i; + + unregister_mmu_stats(s); + for (i = 0; i < ARRAY_SIZE(cpu_core_attrs); i++) + sysdev_remove_file(s, &cpu_core_attrs[i]); +} +#endif + +static int __cpuinit sysfs_cpu_notify(struct notifier_block *self, + unsigned long action, void *hcpu) +{ + unsigned int cpu = (unsigned int)(long)hcpu; + + switch (action) { + case CPU_ONLINE: + case CPU_ONLINE_FROZEN: + register_cpu_online(cpu); + break; +#ifdef CONFIG_HOTPLUG_CPU + case CPU_DEAD: + case CPU_DEAD_FROZEN: + unregister_cpu_online(cpu); + break; +#endif + } + return NOTIFY_OK; +} + +static struct notifier_block __cpuinitdata sysfs_cpu_nb = { + .notifier_call = sysfs_cpu_notify, +}; + +static void __init check_mmu_stats(void) +{ + unsigned long dummy1, err; + + if (tlb_type != hypervisor) + return; + + err = sun4v_mmustat_info(&dummy1); + if (!err) + mmu_stats_supported = 1; +} + +static int __init topology_init(void) +{ + int cpu; + + check_mmu_stats(); + + register_cpu_notifier(&sysfs_cpu_nb); + + for_each_possible_cpu(cpu) { + struct cpu *c = &per_cpu(cpu_devices, cpu); + + register_cpu(c, cpu); + if (cpu_online(cpu)) + register_cpu_online(cpu); + } + + return 0; +} + +subsys_initcall(topology_init); diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index af625147df6..4733f009c7c 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -3933,10 +3933,13 @@ static unsigned int ata_dev_set_xfermode(struct ata_device *dev) /* set up set-features taskfile */ DPRINTK("set features - xfer mode\n"); + /* Some controllers and ATAPI devices show flaky interrupt + * behavior after setting xfer mode. Use polling instead. + */ ata_tf_init(dev, &tf); tf.command = ATA_CMD_SET_FEATURES; tf.feature = SETFEATURES_XFER; - tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; + tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING; tf.protocol = ATA_PROT_NODATA; tf.nsect = dev->xfer_mode; @@ -5414,14 +5417,6 @@ unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc) } } - /* Some controllers show flaky interrupt behavior after - * setting xfer mode. Use polling instead. - */ - if (unlikely(qc->tf.command == ATA_CMD_SET_FEATURES && - qc->tf.feature == SETFEATURES_XFER) && - (ap->flags & ATA_FLAG_SETXFER_POLLING)) - qc->tf.flags |= ATA_TFLAG_POLLING; - /* select the device */ ata_dev_select(ap, qc->dev->devno, 1, 0); diff --git a/drivers/ata/pata_via.c b/drivers/ata/pata_via.c index a8462f1e890..63eca299c62 100644 --- a/drivers/ata/pata_via.c +++ b/drivers/ata/pata_via.c @@ -452,7 +452,7 @@ static int via_init_one(struct pci_dev *pdev, const struct pci_device_id *id) /* Early VIA without UDMA support */ static const struct ata_port_info via_mwdma_info = { .sht = &via_sht, - .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SETXFER_POLLING, + .flags = ATA_FLAG_SLAVE_POSS, .pio_mask = 0x1f, .mwdma_mask = 0x07, .port_ops = &via_port_ops @@ -460,7 +460,7 @@ static int via_init_one(struct pci_dev *pdev, const struct pci_device_id *id) /* Ditto with IRQ masking required */ static const struct ata_port_info via_mwdma_info_borked = { .sht = &via_sht, - .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SETXFER_POLLING, + .flags = ATA_FLAG_SLAVE_POSS, .pio_mask = 0x1f, .mwdma_mask = 0x07, .port_ops = &via_port_ops_noirq, @@ -468,7 +468,7 @@ static int via_init_one(struct pci_dev *pdev, const struct pci_device_id *id) /* VIA UDMA 33 devices (and borked 66) */ static const struct ata_port_info via_udma33_info = { .sht = &via_sht, - .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SETXFER_POLLING, + .flags = ATA_FLAG_SLAVE_POSS, .pio_mask = 0x1f, .mwdma_mask = 0x07, .udma_mask = 0x7, @@ -477,7 +477,7 @@ static int via_init_one(struct pci_dev *pdev, const struct pci_device_id *id) /* VIA UDMA 66 devices */ static const struct ata_port_info via_udma66_info = { .sht = &via_sht, - .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SETXFER_POLLING, + .flags = ATA_FLAG_SLAVE_POSS, .pio_mask = 0x1f, .mwdma_mask = 0x07, .udma_mask = 0x1f, @@ -486,7 +486,7 @@ static int via_init_one(struct pci_dev *pdev, const struct pci_device_id *id) /* VIA UDMA 100 devices */ static const struct ata_port_info via_udma100_info = { .sht = &via_sht, - .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SETXFER_POLLING, + .flags = ATA_FLAG_SLAVE_POSS, .pio_mask = 0x1f, .mwdma_mask = 0x07, .udma_mask = 0x3f, @@ -495,7 +495,7 @@ static int via_init_one(struct pci_dev *pdev, const struct pci_device_id *id) /* UDMA133 with bad AST (All current 133) */ static const struct ata_port_info via_udma133_info = { .sht = &via_sht, - .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SETXFER_POLLING, + .flags = ATA_FLAG_SLAVE_POSS, .pio_mask = 0x1f, .mwdma_mask = 0x07, .udma_mask = 0x7f, /* FIXME: should check north bridge */ diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c index 9c67df5ccfa..7f6d02ce1b5 100644 --- a/drivers/atm/firestream.c +++ b/drivers/atm/firestream.c @@ -1475,6 +1475,7 @@ static void top_off_fp (struct fs_dev *dev, struct freepool *fp, struct FS_BPENTRY *qe, *ne; struct sk_buff *skb; int n = 0; + u32 qe_tmp; fs_dprintk (FS_DEBUG_QUEUE, "Topping off queue at %x (%d-%d/%d)\n", fp->offset, read_fs (dev, FP_CNT (fp->offset)), fp->n, @@ -1502,10 +1503,16 @@ static void top_off_fp (struct fs_dev *dev, struct freepool *fp, ne->skb = skb; ne->fp = fp; - qe = (struct FS_BPENTRY *) (read_fs (dev, FP_EA(fp->offset))); - fs_dprintk (FS_DEBUG_QUEUE, "link at %p\n", qe); - if (qe) { - qe = bus_to_virt ((long) qe); + /* + * FIXME: following code encodes and decodes + * machine pointers (could be 64-bit) into a + * 32-bit register. + */ + + qe_tmp = read_fs (dev, FP_EA(fp->offset)); + fs_dprintk (FS_DEBUG_QUEUE, "link at %x\n", qe_tmp); + if (qe_tmp) { + qe = bus_to_virt ((long) qe_tmp); qe->next = virt_to_bus(ne); qe->flags &= ~FP_FLAGS_EPI; } else diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c index b234729706b..be6b93c20f6 100644 --- a/drivers/input/evdev.c +++ b/drivers/input/evdev.c @@ -699,9 +699,9 @@ static void evdev_disconnect(struct input_handle *handle) if (evdev->open) { input_flush_device(handle, NULL); input_close_device(handle); - wake_up_interruptible(&evdev->wait); list_for_each_entry(client, &evdev->client_list, node) kill_fasync(&client->fasync, SIGIO, POLL_HUP); + wake_up_interruptible(&evdev->wait); } else evdev_free(evdev); } diff --git a/drivers/input/joydev.c b/drivers/input/joydev.c index 06f0541b24d..10e3b7bc925 100644 --- a/drivers/input/joydev.c +++ b/drivers/input/joydev.c @@ -594,9 +594,9 @@ static void joydev_disconnect(struct input_handle *handle) if (joydev->open) { input_close_device(handle); - wake_up_interruptible(&joydev->wait); list_for_each_entry(client, &joydev->client_list, node) kill_fasync(&client->fasync, SIGIO, POLL_HUP); + wake_up_interruptible(&joydev->wait); } else joydev_free(joydev); } diff --git a/drivers/input/joystick/db9.c b/drivers/input/joystick/db9.c index 86ad1027e12..b069ee18e35 100644 --- a/drivers/input/joystick/db9.c +++ b/drivers/input/joystick/db9.c @@ -54,7 +54,7 @@ static struct db9_config db9_cfg[DB9_MAX_PORTS] __initdata; module_param_array_named(dev, db9_cfg[0].args, int, &db9_cfg[0].nargs, 0); MODULE_PARM_DESC(dev, "Describes first attached device (<parport#>,<type>)"); -module_param_array_named(dev2, db9_cfg[1].args, int, &db9_cfg[0].nargs, 0); +module_param_array_named(dev2, db9_cfg[1].args, int, &db9_cfg[1].nargs, 0); MODULE_PARM_DESC(dev2, "Describes second attached device (<parport#>,<type>)"); module_param_array_named(dev3, db9_cfg[2].args, int, &db9_cfg[2].nargs, 0); MODULE_PARM_DESC(dev3, "Describes third attached device (<parport#>,<type>)"); diff --git a/drivers/input/mouse/Kconfig b/drivers/input/mouse/Kconfig index eb0167e9f0c..50e06e8dd05 100644 --- a/drivers/input/mouse/Kconfig +++ b/drivers/input/mouse/Kconfig @@ -48,7 +48,7 @@ config MOUSE_PS2_ALPS If unsure, say Y. config MOUSE_PS2_LOGIPS2PP - bool "Logictech PS/2++ mouse protocol extension" if EMBEDDED + bool "Logitech PS/2++ mouse protocol extension" if EMBEDDED default y depends on MOUSE_PS2 help diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c index 8675f950939..3f4866d8d18 100644 --- a/drivers/input/mousedev.c +++ b/drivers/input/mousedev.c @@ -766,9 +766,9 @@ static void mousedev_disconnect(struct input_handle *handle) if (mousedev->open) { input_close_device(handle); - wake_up_interruptible(&mousedev->wait); list_for_each_entry(client, &mousedev->client_list, node) kill_fasync(&client->fasync, SIGIO, POLL_HUP); + wake_up_interruptible(&mousedev->wait); } else mousedev_free(mousedev); } diff --git a/drivers/input/tsdev.c b/drivers/input/tsdev.c index 8238b13874c..2db364898e1 100644 --- a/drivers/input/tsdev.c +++ b/drivers/input/tsdev.c @@ -476,9 +476,9 @@ static void tsdev_disconnect(struct input_handle *handle) if (tsdev->open) { input_close_device(handle); - wake_up_interruptible(&tsdev->wait); list_for_each_entry(client, &tsdev->client_list, node) kill_fasync(&client->fasync, SIGIO, POLL_HUP); + wake_up_interruptible(&tsdev->wait); } else tsdev_free(tsdev); } diff --git a/drivers/mfd/ucb1x00-ts.c b/drivers/mfd/ucb1x00-ts.c index 7772bd1d92b..38e815a2e87 100644 --- a/drivers/mfd/ucb1x00-ts.c +++ b/drivers/mfd/ucb1x00-ts.c @@ -291,7 +291,7 @@ static void ucb1x00_ts_irq(int idx, void *id) static int ucb1x00_ts_open(struct input_dev *idev) { - struct ucb1x00_ts *ts = idev->private; + struct ucb1x00_ts *ts = input_get_drvdata(idev); int ret = 0; BUG_ON(ts->rtask); @@ -328,7 +328,7 @@ static int ucb1x00_ts_open(struct input_dev *idev) */ static void ucb1x00_ts_close(struct input_dev *idev) { - struct ucb1x00_ts *ts = idev->private; + struct ucb1x00_ts *ts = input_get_drvdata(idev); if (ts->rtask) kthread_stop(ts->rtask); @@ -380,7 +380,6 @@ static int ucb1x00_ts_add(struct ucb1x00_dev *dev) ts->idev = idev; ts->adcsync = adcsync ? UCB_SYNC : UCB_NOSYNC; - idev->private = ts; idev->name = "Touchscreen panel"; idev->id.product = ts->ucb->id; idev->open = ucb1x00_ts_open; @@ -391,6 +390,8 @@ static int ucb1x00_ts_add(struct ucb1x00_dev *dev) __set_bit(ABS_Y, idev->absbit); __set_bit(ABS_PRESSURE, idev->absbit); + input_set_drvdata(idev, ts); + err = input_register_device(idev); if (err) goto fail; diff --git a/drivers/mtd/Makefile b/drivers/mtd/Makefile index 92055405cb3..451adcc52b3 100644 --- a/drivers/mtd/Makefile +++ b/drivers/mtd/Makefile @@ -1,10 +1,9 @@ # # Makefile for the memory technology device drivers. # -# $Id: Makefile.common,v 1.7 2005/07/11 10:39:27 gleixner Exp $ # Core functionality. -mtd-y := mtdcore.o +mtd-y := mtdcore.o mtdsuper.o mtd-$(CONFIG_MTD_PARTITIONS) += mtdpart.o obj-$(CONFIG_MTD) += $(mtd-y) diff --git a/drivers/mtd/maps/uclinux.c b/drivers/mtd/maps/uclinux.c index 389fea28b9a..14ffb1a9302 100644 --- a/drivers/mtd/maps/uclinux.c +++ b/drivers/mtd/maps/uclinux.c @@ -16,7 +16,6 @@ #include <linux/kernel.h> #include <linux/fs.h> #include <linux/major.h> -#include <linux/root_dev.h> #include <linux/mtd/mtd.h> #include <linux/mtd/map.h> #include <linux/mtd/partitions.h> @@ -89,10 +88,6 @@ int __init uclinux_mtd_init(void) uclinux_ram_mtdinfo = mtd; add_mtd_partitions(mtd, uclinux_romfs, NUM_PARTITIONS); - printk("uclinux[mtd]: set %s to be root filesystem\n", - uclinux_romfs[0].name); - ROOT_DEV = MKDEV(MTD_BLOCK_MAJOR, 0); - return(0); } diff --git a/drivers/mtd/mtdsuper.c b/drivers/mtd/mtdsuper.c new file mode 100644 index 00000000000..aca33197120 --- /dev/null +++ b/drivers/mtd/mtdsuper.c @@ -0,0 +1,232 @@ +/* MTD-based superblock management + * + * Copyright © 2001-2007 Red Hat, Inc. All Rights Reserved. + * Written by: David Howells <dhowells@redhat.com> + * David Woodhouse <dwmw2@infradead.org> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include <linux/mtd/super.h> +#include <linux/namei.h> +#include <linux/ctype.h> + +/* + * compare superblocks to see if they're equivalent + * - they are if the underlying MTD device is the same + */ +static int get_sb_mtd_compare(struct super_block *sb, void *_mtd) +{ + struct mtd_info *mtd = _mtd; + + if (sb->s_mtd == mtd) { + DEBUG(2, "MTDSB: Match on device %d (\"%s\")\n", + mtd->index, mtd->name); + return 1; + } + + DEBUG(2, "MTDSB: No match, device %d (\"%s\"), device %d (\"%s\")\n", + sb->s_mtd->index, sb->s_mtd->name, mtd->index, mtd->name); + return 0; +} + +/* + * mark the superblock by the MTD device it is using + * - set the device number to be the correct MTD block device for pesuperstence + * of NFS exports + */ +static int get_sb_mtd_set(struct super_block *sb, void *_mtd) +{ + struct mtd_info *mtd = _mtd; + + sb->s_mtd = mtd; + sb->s_dev = MKDEV(MTD_BLOCK_MAJOR, mtd->index); + return 0; +} + +/* + * get a superblock on an MTD-backed filesystem + */ +static int get_sb_mtd_aux(struct file_system_type *fs_type, int flags, + const char *dev_name, void *data, + struct mtd_info *mtd, + int (*fill_super)(struct super_block *, void *, int), + struct vfsmount *mnt) +{ + struct super_block *sb; + int ret; + + sb = sget(fs_type, get_sb_mtd_compare, get_sb_mtd_set, mtd); + if (IS_ERR(sb)) + goto out_error; + + if (sb->s_root) + goto already_mounted; + + /* fresh new superblock */ + DEBUG(1, "MTDSB: New superblock for device %d (\"%s\")\n", + mtd->index, mtd->name); + + ret = fill_super(sb, data, flags & MS_SILENT ? 1 : 0); + if (ret < 0) { + up_write(&sb->s_umount); + deactivate_super(sb); + return ret; + } + + /* go */ + sb->s_flags |= MS_ACTIVE; + return simple_set_mnt(mnt, sb); + + /* new mountpoint for an already mounted superblock */ +already_mounted: + DEBUG(1, "MTDSB: Device %d (\"%s\") is already mounted\n", + mtd->index, mtd->name); + ret = simple_set_mnt(mnt, sb); + goto out_put; + +out_error: + ret = PTR_ERR(sb); +out_put: + put_mtd_device(mtd); + return ret; +} + +/* + * get a superblock on an MTD-backed filesystem by MTD device number + */ +static int get_sb_mtd_nr(struct file_system_type *fs_type, int flags, + const char *dev_name, void *data, int mtdnr, + int (*fill_super)(struct super_block *, void *, int), + struct vfsmount *mnt) +{ + struct mtd_info *mtd; + + mtd = get_mtd_device(NULL, mtdnr); + if (IS_ERR(mtd)) { + DEBUG(0, "MTDSB: Device #%u doesn't appear to exist\n", mtdnr); + return PTR_ERR(mtd); + } + + return get_sb_mtd_aux(fs_type, flags, dev_name, data, mtd, fill_super, + mnt); +} + +/* + * set up an MTD-based superblock + */ +int get_sb_mtd(struct file_system_type *fs_type, int flags, + const char *dev_name, void *data, + int (*fill_super)(struct super_block *, void *, int), + struct vfsmount *mnt) +{ + struct nameidata nd; + int mtdnr, ret; + + if (!dev_name) + return -EINVAL; + + DEBUG(2, "MTDSB: dev_name \"%s\"\n", dev_name); + + /* the preferred way of mounting in future; especially when + * CONFIG_BLOCK=n - we specify the underlying MTD device by number or + * by name, so that we don't require block device support to be present + * in the kernel. */ + if (dev_name[0] == 'm' && dev_name[1] == 't' && dev_name[2] == 'd') { + if (dev_name[3] == ':') { + struct mtd_info *mtd; + + /* mount by MTD device name */ + DEBUG(1, "MTDSB: mtd:%%s, name \"%s\"\n", + dev_name + 4); + + for (mtdnr = 0; mtdnr < MAX_MTD_DEVICES; mtdnr++) { + mtd = get_mtd_device(NULL, mtdnr); + if (!IS_ERR(mtd)) { + if (!strcmp(mtd->name, dev_name + 4)) + return get_sb_mtd_aux( + fs_type, flags, + dev_name, data, mtd, + fill_super, mnt); + + put_mtd_device(mtd); + } + } + + printk(KERN_NOTICE "MTD:" + " MTD device with name \"%s\" not found.\n", + dev_name + 4); + + } else if (isdigit(dev_name[3])) { + /* mount by MTD device number name */ + char *endptr; + + mtdnr = simple_strtoul(dev_name + 3, &endptr, 0); + if (!*endptr) { + /* It was a valid number */ + DEBUG(1, "MTDSB: mtd%%d, mtdnr %d\n", + mtdnr); + return get_sb_mtd_nr(fs_type, flags, + dev_name, data, + mtdnr, fill_super, mnt); + } + } + } + + /* try the old way - the hack where we allowed users to mount + * /dev/mtdblock$(n) but didn't actually _use_ the blockdev + */ + ret = path_lookup(dev_name, LOOKUP_FOLLOW, &nd); + + DEBUG(1, "MTDSB: path_lookup() returned %d, inode %p\n", + ret, nd.dentry ? nd.dentry->d_inode : NULL); + + if (ret) + return ret; + + ret = -EINVAL; + + if (!S_ISBLK(nd.dentry->d_inode->i_mode)) + goto out; + + if (nd.mnt->mnt_flags & MNT_NODEV) { + ret = -EACCES; + goto out; + } + + if (imajor(nd.dentry->d_inode) != MTD_BLOCK_MAJOR) + goto not_an_MTD_device; + + mtdnr = iminor(nd.dentry->d_inode); + path_release(&nd); + + return get_sb_mtd_nr(fs_type, flags, dev_name, data, mtdnr, fill_super, + mnt); + +not_an_MTD_device: + if (!(flags & MS_SILENT)) + printk(KERN_NOTICE + "MTD: Attempt to mount non-MTD device \"%s\"\n", + dev_name); +out: + path_release(&nd); + return ret; + +} + +EXPORT_SYMBOL_GPL(get_sb_mtd); + +/* + * destroy an MTD-based superblock + */ +void kill_mtd_super(struct super_block *sb) +{ + generic_shutdown_super(sb); + put_mtd_device(sb->s_mtd); + sb->s_mtd = NULL; +} + +EXPORT_SYMBOL_GPL(kill_mtd_super); diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c index 023779a581f..2f3184184ad 100644 --- a/drivers/net/tg3.c +++ b/drivers/net/tg3.c @@ -64,8 +64,8 @@ #define DRV_MODULE_NAME "tg3" #define PFX DRV_MODULE_NAME ": " -#define DRV_MODULE_VERSION "3.76" -#define DRV_MODULE_RELDATE "May 5, 2007" +#define DRV_MODULE_VERSION "3.77" +#define DRV_MODULE_RELDATE "May 31, 2007" #define TG3_DEF_MAC_MODE 0 #define TG3_DEF_RX_MODE 0 @@ -10961,6 +10961,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) * upon subsystem IDs. */ if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL && + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 && !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) { tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT | TG3_FLAG_USE_LINKCHG_REG); diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig index 572034ceb14..2b2f5c12019 100644 --- a/drivers/scsi/Kconfig +++ b/drivers/scsi/Kconfig @@ -1532,6 +1532,7 @@ source "drivers/scsi/arm/Kconfig" config JAZZ_ESP bool "MIPS JAZZ FAS216 SCSI support" depends on MACH_JAZZ && SCSI + select SCSI_SPI_ATTRS help This is the driver for the onboard SCSI host adapter of MIPS Magnum 4000, Acer PICA, Olivetti M700-10 and a few other identical OEM @@ -1756,6 +1757,7 @@ config SUN3X_ESP config SCSI_SUNESP tristate "Sparc ESP Scsi Driver" depends on SBUS && SCSI + select SCSI_SPI_ATTRS help This is the driver for the Sun ESP SCSI host adapter. The ESP chipset is present in most SPARC SBUS-based computers. diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c index 350ea7feb61..5c487ff096c 100644 --- a/drivers/scsi/aacraid/linit.c +++ b/drivers/scsi/aacraid/linit.c @@ -863,6 +863,14 @@ static struct scsi_host_template aac_driver_template = { .emulated = 1, }; +static void __aac_shutdown(struct aac_dev * aac) +{ + kthread_stop(aac->thread); + aac_send_shutdown(aac); + aac_adapter_disable_int(aac); + free_irq(aac->pdev->irq, aac); +} + static int __devinit aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) { @@ -1015,10 +1023,7 @@ static int __devinit aac_probe_one(struct pci_dev *pdev, return 0; out_deinit: - kthread_stop(aac->thread); - aac_send_shutdown(aac); - aac_adapter_disable_int(aac); - free_irq(pdev->irq, aac); + __aac_shutdown(aac); out_unmap: aac_fib_map_free(aac); pci_free_consistent(aac->pdev, aac->comm_size, aac->comm_addr, aac->comm_phys); @@ -1038,7 +1043,8 @@ static void aac_shutdown(struct pci_dev *dev) { struct Scsi_Host *shost = pci_get_drvdata(dev); struct aac_dev *aac = (struct aac_dev *)shost->hostdata; - aac_send_shutdown(aac); + scsi_block_requests(shost); + __aac_shutdown(aac); } static void __devexit aac_remove_one(struct pci_dev *pdev) @@ -1048,16 +1054,12 @@ static void __devexit aac_remove_one(struct pci_dev *pdev) scsi_remove_host(shost); - kthread_stop(aac->thread); - - aac_send_shutdown(aac); - aac_adapter_disable_int(aac); + __aac_shutdown(aac); aac_fib_map_free(aac); pci_free_consistent(aac->pdev, aac->comm_size, aac->comm_addr, aac->comm_phys); kfree(aac->queues); - free_irq(pdev->irq, aac); aac_adapter_ioremap(aac, 0); kfree(aac->fibs); diff --git a/drivers/scsi/atari_NCR5380.c b/drivers/scsi/atari_NCR5380.c index eff846ae0af..03dbe60c264 100644 --- a/drivers/scsi/atari_NCR5380.c +++ b/drivers/scsi/atari_NCR5380.c @@ -894,45 +894,6 @@ static int NCR5380_init(struct Scsi_Host *instance, int flags) } /* - * our own old-style timeout update - */ -/* - * The strategy is to cause the timer code to call scsi_times_out() - * when the soonest timeout is pending. - * The arguments are used when we are queueing a new command, because - * we do not want to subtract the time used from this time, but when we - * set the timer, we want to take this value into account. - */ - -int atari_scsi_update_timeout(Scsi_Cmnd * SCset, int timeout) -{ - int rtn; - - /* - * We are using the new error handling code to actually register/deregister - * timers for timeout. - */ - - if (!timer_pending(&SCset->eh_timeout)) - rtn = 0; - else - rtn = SCset->eh_timeout.expires - jiffies; - - if (timeout == 0) { - del_timer(&SCset->eh_timeout); - SCset->eh_timeout.data = (unsigned long)NULL; - SCset->eh_timeout.expires = 0; - } else { - if (SCset->eh_timeout.data != (unsigned long)NULL) - del_timer(&SCset->eh_timeout); - SCset->eh_timeout.data = (unsigned long)SCset; - SCset->eh_timeout.expires = jiffies + timeout; - add_timer(&SCset->eh_timeout); - } - return rtn; -} - -/* * Function : int NCR5380_queue_command (Scsi_Cmnd *cmd, * void (*done)(Scsi_Cmnd *)) * @@ -956,7 +917,6 @@ static int NCR5380_queue_command(Scsi_Cmnd *cmd, void (*done)(Scsi_Cmnd *)) Scsi_Cmnd *tmp; int oldto; unsigned long flags; - // extern int update_timeout(Scsi_Cmnd * SCset, int timeout); #if (NDEBUG & NDEBUG_NO_WRITE) switch (cmd->cmnd[0]) { @@ -1029,9 +989,9 @@ static int NCR5380_queue_command(Scsi_Cmnd *cmd, void (*done)(Scsi_Cmnd *)) * alter queues and touch the lock. */ if (!IS_A_TT()) { - oldto = atari_scsi_update_timeout(cmd, 0); + /* perhaps stop command timer here */ falcon_get_lock(); - atari_scsi_update_timeout(cmd, oldto); + /* perhaps restart command timer here */ } if (!(hostdata->issue_queue) || (cmd->cmnd[0] == REQUEST_SENSE)) { LIST(cmd, hostdata->issue_queue); diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index dd076da86a4..b98136adaaa 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -2590,7 +2590,7 @@ qla2x00_down_timeout(struct semaphore *sema, unsigned long timeout) return 0; if (msleep_interruptible(step)) break; - } while (--iterations >= 0); + } while (--iterations > 0); return -ETIMEDOUT; } diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c index a67f315244d..662577fbe7a 100644 --- a/drivers/scsi/scsi_scan.c +++ b/drivers/scsi/scsi_scan.c @@ -184,6 +184,15 @@ int scsi_complete_async_scans(void) /* Only exported for the benefit of scsi_wait_scan */ EXPORT_SYMBOL_GPL(scsi_complete_async_scans); +#ifndef MODULE +/* + * For async scanning we need to wait for all the scans to complete before + * trying to mount the root fs. Otherwise non-modular drivers may not be ready + * yet. + */ +late_initcall(scsi_complete_async_scans); +#endif + /** * scsi_unlock_floptical - unlock device via a special MODE SENSE command * @sdev: scsi device to send command to diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index c3219b29b5a..4831edbae2d 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -411,7 +411,7 @@ EXPORT_SYMBOL_GPL(spi_alloc_master); */ int spi_register_master(struct spi_master *master) { - static atomic_t dyn_bus_id = ATOMIC_INIT((1<<16) - 1); + static atomic_t dyn_bus_id = ATOMIC_INIT((1<<15) - 1); struct device *dev = master->cdev.dev; int status = -ENODEV; int dynamic = 0; diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig index 6e1f1ea21b3..403dac787eb 100644 --- a/drivers/video/Kconfig +++ b/drivers/video/Kconfig @@ -755,7 +755,7 @@ config FB_LEO config FB_IGA bool "IGA 168x display support" - depends on FB && SPARC32 + depends on (FB = y) && SPARC32 select FB_CFB_FILLRECT select FB_CFB_COPYAREA select FB_CFB_IMAGEBLIT @@ -765,7 +765,7 @@ config FB_IGA config FB_XVR500 bool "Sun XVR-500 3DLABS Wildcat support" - depends on FB && PCI && SPARC64 + depends on (FB = y) && PCI && SPARC64 select FB_CFB_FILLRECT select FB_CFB_COPYAREA select FB_CFB_IMAGEBLIT @@ -778,7 +778,7 @@ config FB_XVR500 config FB_XVR2500 bool "Sun XVR-2500 3DLABS Wildcat support" - depends on FB && PCI && SPARC64 + depends on (FB = y) && PCI && SPARC64 select FB_CFB_FILLRECT select FB_CFB_COPYAREA select FB_CFB_IMAGEBLIT diff --git a/drivers/video/console/Makefile b/drivers/video/console/Makefile index 9b26dda18a3..ac46cc3f6a2 100644 --- a/drivers/video/console/Makefile +++ b/drivers/video/console/Makefile @@ -47,7 +47,7 @@ targets := promcon_tbl.c quiet_cmd_conmakehash = CNMKHSH $@ cmd_conmakehash = scripts/conmakehash $< | \ sed -e '/\#include <[^>]*>/p' -e 's/types/init/' \ - -e 's/dfont\(_uni.*\]\)/promfont\1 __initdata/' > $@ + -e 's/dfont\(_uni.*\]\)/promfont\1 /' > $@ $(obj)/promcon_tbl.c: $(src)/prom.uni $(call cmd,conmakehash) diff --git a/drivers/video/ffb.c b/drivers/video/ffb.c index 1d4e8354b56..3f6c98fad43 100644 --- a/drivers/video/ffb.c +++ b/drivers/video/ffb.c @@ -656,7 +656,7 @@ static int ffb_setcolreg(unsigned regno, { u32 value; - if (regno >= 256) + if (regno >= 16) return 1; red >>= 8; @@ -903,7 +903,7 @@ ffb_init_fix(struct fb_info *info) struct all_info { struct fb_info info; struct ffb_par par; - u32 pseudo_palette[256]; + u32 pseudo_palette[16]; }; static int ffb_init_one(struct of_device *op) diff --git a/drivers/video/sunxvr2500.c b/drivers/video/sunxvr2500.c index 4316c7fe8e2..c3869a96ab5 100644 --- a/drivers/video/sunxvr2500.c +++ b/drivers/video/sunxvr2500.c @@ -28,7 +28,7 @@ struct s3d_info { unsigned int depth; unsigned int fb_size; - u32 pseudo_palette[256]; + u32 pseudo_palette[16]; }; static int __devinit s3d_get_props(struct s3d_info *sp) @@ -52,15 +52,14 @@ static int s3d_setcolreg(unsigned regno, { u32 value; - if (regno >= 256) - return 1; + if (regno < 16) { + red >>= 8; + green >>= 8; + blue >>= 8; - red >>= 8; - green >>= 8; - blue >>= 8; - - value = (blue << 24) | (green << 16) | (red << 8); - ((u32 *)info->pseudo_palette)[regno] = value; + value = (blue << 24) | (green << 16) | (red << 8); + ((u32 *)info->pseudo_palette)[regno] = value; + } return 0; } diff --git a/drivers/video/sunxvr500.c b/drivers/video/sunxvr500.c index 08880a62bfa..71bf3f1f00b 100644 --- a/drivers/video/sunxvr500.c +++ b/drivers/video/sunxvr500.c @@ -50,7 +50,7 @@ struct e3d_info { u32 fb8_0_off; u32 fb8_1_off; - u32 pseudo_palette[256]; + u32 pseudo_palette[16]; }; static int __devinit e3d_get_props(struct e3d_info *ep) @@ -126,7 +126,9 @@ static int e3d_setcolreg(unsigned regno, blue_8 = blue >> 8; value = (blue_8 << 24) | (green_8 << 16) | (red_8 << 8); - ((u32 *)info->pseudo_palette)[regno] = value; + + if (info->fix.visual == FB_VISUAL_TRUECOLOR && regno < 16) + ((u32 *)info->pseudo_palette)[regno] = value; red_10 = red >> 6; diff --git a/fs/ioctl.c b/fs/ioctl.c index 479c1038ed4..8c90cbc903f 100644 --- a/fs/ioctl.c +++ b/fs/ioctl.c @@ -12,6 +12,7 @@ #include <linux/fs.h> #include <linux/security.h> #include <linux/module.h> +#include <linux/kallsyms.h> #include <asm/uaccess.h> #include <asm/ioctls.h> @@ -20,6 +21,7 @@ static long do_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { int error = -ENOTTY; + void *f; if (!filp->f_op) goto out; @@ -29,10 +31,16 @@ static long do_ioctl(struct file *filp, unsigned int cmd, if (error == -ENOIOCTLCMD) error = -EINVAL; goto out; - } else if (filp->f_op->ioctl) { + } else if ((f = filp->f_op->ioctl)) { lock_kernel(); - error = filp->f_op->ioctl(filp->f_path.dentry->d_inode, - filp, cmd, arg); + if (!filp->f_op->ioctl) { + printk("%s: ioctl %p disappeared\n", __FUNCTION__, f); + print_symbol("symbol: %s\n", (unsigned long)f); + dump_stack(); + } else { + error = filp->f_op->ioctl(filp->f_path.dentry->d_inode, + filp, cmd, arg); + } unlock_kernel(); } diff --git a/fs/jffs2/readinode.c b/fs/jffs2/readinode.c index 4884d5edfe6..12e83f67eee 100644 --- a/fs/jffs2/readinode.c +++ b/fs/jffs2/readinode.c @@ -229,9 +229,16 @@ static int jffs2_add_tn_to_tree(struct jffs2_sb_info *c, check anyway. */ if (!tn->fn->size) { if (rii->mdata_tn) { - /* We had a candidate mdata node already */ - dbg_readinode("kill old mdata with ver %d\n", rii->mdata_tn->version); - jffs2_kill_tn(c, rii->mdata_tn); + if (rii->mdata_tn->version < tn->version) { + /* We had a candidate mdata node already */ + dbg_readinode("kill old mdata with ver %d\n", rii->mdata_tn->version); + jffs2_kill_tn(c, rii->mdata_tn); + } else { + dbg_readinode("kill new mdata with ver %d (older than existing %d\n", + tn->version, rii->mdata_tn->version); + jffs2_kill_tn(c, tn); + return 0; + } } rii->mdata_tn = tn; dbg_readinode("keep new mdata with ver %d\n", tn->version); @@ -1044,7 +1051,8 @@ static int jffs2_get_inode_nodes(struct jffs2_sb_info *c, struct jffs2_inode_inf case JFFS2_NODETYPE_DIRENT: - if (JFFS2_MIN_NODE_HEADER < sizeof(struct jffs2_raw_dirent)) { + if (JFFS2_MIN_NODE_HEADER < sizeof(struct jffs2_raw_dirent) && + len < sizeof(struct jffs2_raw_dirent)) { err = read_more(c, ref, sizeof(struct jffs2_raw_dirent), &len, buf); if (unlikely(err)) goto free_out; @@ -1058,7 +1066,8 @@ static int jffs2_get_inode_nodes(struct jffs2_sb_info *c, struct jffs2_inode_inf case JFFS2_NODETYPE_INODE: - if (JFFS2_MIN_NODE_HEADER < sizeof(struct jffs2_raw_inode)) { + if (JFFS2_MIN_NODE_HEADER < sizeof(struct jffs2_raw_inode) && + len < sizeof(struct jffs2_raw_inode)) { err = read_more(c, ref, sizeof(struct jffs2_raw_inode), &len, buf); if (unlikely(err)) goto free_out; @@ -1071,7 +1080,8 @@ static int jffs2_get_inode_nodes(struct jffs2_sb_info *c, struct jffs2_inode_inf break; default: - if (JFFS2_MIN_NODE_HEADER < sizeof(struct jffs2_unknown_node)) { + if (JFFS2_MIN_NODE_HEADER < sizeof(struct jffs2_unknown_node) && + len < sizeof(struct jffs2_unknown_node)) { err = read_more(c, ref, sizeof(struct jffs2_unknown_node), &len, buf); if (unlikely(err)) goto free_out; diff --git a/fs/jffs2/super.c b/fs/jffs2/super.c index 6488af43bc9..e220d3bd610 100644 --- a/fs/jffs2/super.c +++ b/fs/jffs2/super.c @@ -19,7 +19,7 @@ #include <linux/mount.h> #include <linux/jffs2.h> #include <linux/pagemap.h> -#include <linux/mtd/mtd.h> +#include <linux/mtd/super.h> #include <linux/ctype.h> #include <linux/namei.h> #include "compr.h" @@ -75,69 +75,27 @@ static const struct super_operations jffs2_super_operations = .sync_fs = jffs2_sync_fs, }; -static int jffs2_sb_compare(struct super_block *sb, void *data) -{ - struct jffs2_sb_info *p = data; - struct jffs2_sb_info *c = JFFS2_SB_INFO(sb); - - /* The superblocks are considered to be equivalent if the underlying MTD - device is the same one */ - if (c->mtd == p->mtd) { - D1(printk(KERN_DEBUG "jffs2_sb_compare: match on device %d (\"%s\")\n", p->mtd->index, p->mtd->name)); - return 1; - } else { - D1(printk(KERN_DEBUG "jffs2_sb_compare: No match, device %d (\"%s\"), device %d (\"%s\")\n", - c->mtd->index, c->mtd->name, p->mtd->index, p->mtd->name)); - return 0; - } -} - -static int jffs2_sb_set(struct super_block *sb, void *data) -{ - struct jffs2_sb_info *p = data; - - /* For persistence of NFS exports etc. we use the same s_dev - each time we mount the device, don't just use an anonymous - device */ - sb->s_fs_info = p; - p->os_priv = sb; - sb->s_dev = MKDEV(MTD_BLOCK_MAJOR, p->mtd->index); - - return 0; -} - -static int jffs2_get_sb_mtd(struct file_system_type *fs_type, - int flags, const char *dev_name, - void *data, struct mtd_info *mtd, - struct vfsmount *mnt) +/* + * fill in the superblock + */ +static int jffs2_fill_super(struct super_block *sb, void *data, int silent) { - struct super_block *sb; struct jffs2_sb_info *c; - int ret; + + D1(printk(KERN_DEBUG "jffs2_get_sb_mtd():" + " New superblock for device %d (\"%s\")\n", + sb->s_mtd->index, sb->s_mtd->name)); c = kzalloc(sizeof(*c), GFP_KERNEL); if (!c) return -ENOMEM; - c->mtd = mtd; - - sb = sget(fs_type, jffs2_sb_compare, jffs2_sb_set, c); - - if (IS_ERR(sb)) - goto out_error; - - if (sb->s_root) { - /* New mountpoint for JFFS2 which is already mounted */ - D1(printk(KERN_DEBUG "jffs2_get_sb_mtd(): Device %d (\"%s\") is already mounted\n", - mtd->index, mtd->name)); - ret = simple_set_mnt(mnt, sb); - goto out_put; - } - D1(printk(KERN_DEBUG "jffs2_get_sb_mtd(): New superblock for device %d (\"%s\")\n", - mtd->index, mtd->name)); + c->mtd = sb->s_mtd; + c->os_priv = sb; + sb->s_fs_info = c; - /* Initialize JFFS2 superblock locks, the further initialization will be - * done later */ + /* Initialize JFFS2 superblock locks, the further initialization will + * be done later */ init_MUTEX(&c->alloc_sem); init_MUTEX(&c->erase_free_sem); init_waitqueue_head(&c->erase_wait); @@ -146,133 +104,20 @@ static int jffs2_get_sb_mtd(struct file_system_type *fs_type, spin_lock_init(&c->inocache_lock); sb->s_op = &jffs2_super_operations; - sb->s_flags = flags | MS_NOATIME; + sb->s_flags = sb->s_flags | MS_NOATIME; sb->s_xattr = jffs2_xattr_handlers; #ifdef CONFIG_JFFS2_FS_POSIX_ACL sb->s_flags |= MS_POSIXACL; #endif - ret = jffs2_do_fill_super(sb, data, flags & MS_SILENT ? 1 : 0); - - if (ret) { - /* Failure case... */ - up_write(&sb->s_umount); - deactivate_super(sb); - return ret; - } - - sb->s_flags |= MS_ACTIVE; - return simple_set_mnt(mnt, sb); - -out_error: - ret = PTR_ERR(sb); - out_put: - kfree(c); - put_mtd_device(mtd); - - return ret; -} - -static int jffs2_get_sb_mtdnr(struct file_system_type *fs_type, - int flags, const char *dev_name, - void *data, int mtdnr, - struct vfsmount *mnt) -{ - struct mtd_info *mtd; - - mtd = get_mtd_device(NULL, mtdnr); - if (IS_ERR(mtd)) { - D1(printk(KERN_DEBUG "jffs2: MTD device #%u doesn't appear to exist\n", mtdnr)); - return PTR_ERR(mtd); - } - - return jffs2_get_sb_mtd(fs_type, flags, dev_name, data, mtd, mnt); + return jffs2_do_fill_super(sb, data, silent); } static int jffs2_get_sb(struct file_system_type *fs_type, int flags, const char *dev_name, void *data, struct vfsmount *mnt) { - int err; - struct nameidata nd; - int mtdnr; - - if (!dev_name) - return -EINVAL; - - D1(printk(KERN_DEBUG "jffs2_get_sb(): dev_name \"%s\"\n", dev_name)); - - /* The preferred way of mounting in future; especially when - CONFIG_BLK_DEV is implemented - we specify the underlying - MTD device by number or by name, so that we don't require - block device support to be present in the kernel. */ - - /* FIXME: How to do the root fs this way? */ - - if (dev_name[0] == 'm' && dev_name[1] == 't' && dev_name[2] == 'd') { - /* Probably mounting without the blkdev crap */ - if (dev_name[3] == ':') { - struct mtd_info *mtd; - - /* Mount by MTD device name */ - D1(printk(KERN_DEBUG "jffs2_get_sb(): mtd:%%s, name \"%s\"\n", dev_name+4)); - for (mtdnr = 0; mtdnr < MAX_MTD_DEVICES; mtdnr++) { - mtd = get_mtd_device(NULL, mtdnr); - if (!IS_ERR(mtd)) { - if (!strcmp(mtd->name, dev_name+4)) - return jffs2_get_sb_mtd(fs_type, flags, dev_name, data, mtd, mnt); - put_mtd_device(mtd); - } - } - printk(KERN_NOTICE "jffs2_get_sb(): MTD device with name \"%s\" not found.\n", dev_name+4); - } else if (isdigit(dev_name[3])) { - /* Mount by MTD device number name */ - char *endptr; - - mtdnr = simple_strtoul(dev_name+3, &endptr, 0); - if (!*endptr) { - /* It was a valid number */ - D1(printk(KERN_DEBUG "jffs2_get_sb(): mtd%%d, mtdnr %d\n", mtdnr)); - return jffs2_get_sb_mtdnr(fs_type, flags, dev_name, data, mtdnr, mnt); - } - } - } - - /* Try the old way - the hack where we allowed users to mount - /dev/mtdblock$(n) but didn't actually _use_ the blkdev */ - - err = path_lookup(dev_name, LOOKUP_FOLLOW, &nd); - - D1(printk(KERN_DEBUG "jffs2_get_sb(): path_lookup() returned %d, inode %p\n", - err, nd.dentry->d_inode)); - - if (err) - return err; - - err = -EINVAL; - - if (!S_ISBLK(nd.dentry->d_inode->i_mode)) - goto out; - - if (nd.mnt->mnt_flags & MNT_NODEV) { - err = -EACCES; - goto out; - } - - if (imajor(nd.dentry->d_inode) != MTD_BLOCK_MAJOR) { - if (!(flags & MS_SILENT)) - printk(KERN_NOTICE "Attempt to mount non-MTD device \"%s\" as JFFS2\n", - dev_name); - goto out; - } - - mtdnr = iminor(nd.dentry->d_inode); - path_release(&nd); - - return jffs2_get_sb_mtdnr(fs_type, flags, dev_name, data, mtdnr, mnt); - -out: - path_release(&nd); - return err; + return get_sb_mtd(fs_type, flags, dev_name, data, jffs2_fill_super, + mnt); } static void jffs2_put_super (struct super_block *sb) @@ -307,8 +152,7 @@ static void jffs2_kill_sb(struct super_block *sb) struct jffs2_sb_info *c = JFFS2_SB_INFO(sb); if (!(sb->s_flags & MS_RDONLY)) jffs2_stop_garbage_collect_thread(c); - generic_shutdown_super(sb); - put_mtd_device(c->mtd); + kill_mtd_super(sb); kfree(c); } diff --git a/fs/jffs2/xattr.c b/fs/jffs2/xattr.c index 78fc08893a6..e48665984cb 100644 --- a/fs/jffs2/xattr.c +++ b/fs/jffs2/xattr.c @@ -754,6 +754,10 @@ void jffs2_clear_xattr_subsystem(struct jffs2_sb_info *c) list_del(&xd->xindex); jffs2_free_xattr_datum(xd); } + list_for_each_entry_safe(xd, _xd, &c->xattr_unchecked, xindex) { + list_del(&xd->xindex); + jffs2_free_xattr_datum(xd); + } } #define XREF_TMPHASH_SIZE (128) @@ -825,7 +829,7 @@ void jffs2_build_xattr_subsystem(struct jffs2_sb_info *c) ref->xd and ref->ic are not valid yet. */ xd = jffs2_find_xattr_datum(c, ref->xid); ic = jffs2_get_ino_cache(c, ref->ino); - if (!xd || !ic) { + if (!xd || !ic || !ic->nlink) { dbg_xattr("xref(ino=%u, xid=%u, xseqno=%u) is orphan.\n", ref->ino, ref->xid, ref->xseqno); ref->xseqno |= XREF_DELETE_MARKER; diff --git a/include/asm-sparc64/cpudata.h b/include/asm-sparc64/cpudata.h index 03c385de761..445026fbec3 100644 --- a/include/asm-sparc64/cpudata.h +++ b/include/asm-sparc64/cpudata.h @@ -31,7 +31,7 @@ typedef struct { unsigned int ecache_size; unsigned int ecache_line_size; int core_id; - unsigned int __pad3; + int proc_id; } cpuinfo_sparc; DECLARE_PER_CPU(cpuinfo_sparc, __cpu_data); diff --git a/include/asm-sparc64/dma-mapping.h b/include/asm-sparc64/dma-mapping.h index 9329429fb7f..4e21c2f3065 100644 --- a/include/asm-sparc64/dma-mapping.h +++ b/include/asm-sparc64/dma-mapping.h @@ -162,6 +162,22 @@ dma_mapping_error(dma_addr_t dma_addr) #else struct device; +struct page; +struct scatterlist; + +static inline int +dma_supported(struct device *dev, u64 mask) +{ + BUG(); + return 0; +} + +static inline int +dma_set_mask(struct device *dev, u64 dma_mask) +{ + BUG(); + return 0; +} static inline void *dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag) @@ -176,6 +192,52 @@ static inline void dma_free_coherent(struct device *dev, size_t size, BUG(); } +static inline dma_addr_t +dma_map_single(struct device *dev, void *cpu_addr, size_t size, + enum dma_data_direction direction) +{ + BUG(); + return 0; +} + +static inline void +dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, + enum dma_data_direction direction) +{ + BUG(); +} + +static inline dma_addr_t +dma_map_page(struct device *dev, struct page *page, + unsigned long offset, size_t size, + enum dma_data_direction direction) +{ + BUG(); + return 0; +} + +static inline void +dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, + enum dma_data_direction direction) +{ + BUG(); +} + +static inline int +dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, + enum dma_data_direction direction) +{ + BUG(); + return 0; +} + +static inline void +dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, + enum dma_data_direction direction) +{ + BUG(); +} + static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size, enum dma_data_direction direction) @@ -190,6 +252,27 @@ dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t siz BUG(); } +static inline void +dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, + enum dma_data_direction direction) +{ + BUG(); +} + +static inline void +dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems, + enum dma_data_direction direction) +{ + BUG(); +} + +static inline int +dma_mapping_error(dma_addr_t dma_addr) +{ + BUG(); + return 0; +} + #endif /* PCI */ diff --git a/include/asm-sparc64/hypervisor.h b/include/asm-sparc64/hypervisor.h index 4a43075a061..5c2f9d4b9f0 100644 --- a/include/asm-sparc64/hypervisor.h +++ b/include/asm-sparc64/hypervisor.h @@ -2798,6 +2798,11 @@ struct hv_mmu_statistics { */ #define HV_FAST_MMUSTAT_INFO 0x103 +#ifndef __ASSEMBLY__ +extern unsigned long sun4v_mmustat_conf(unsigned long ra, unsigned long *orig_ra); +extern unsigned long sun4v_mmustat_info(unsigned long *ra); +#endif + /* NCS crypto services */ /* ncs_request() sub-function numbers */ diff --git a/include/asm-sparc64/smp.h b/include/asm-sparc64/smp.h index f76e1492add..4fb8c4bfb84 100644 --- a/include/asm-sparc64/smp.h +++ b/include/asm-sparc64/smp.h @@ -33,6 +33,8 @@ extern cpumask_t phys_cpu_present_map; #define cpu_possible_map phys_cpu_present_map extern cpumask_t cpu_sibling_map[NR_CPUS]; +extern cpumask_t cpu_core_map[NR_CPUS]; +extern int sparc64_multi_core; /* * General functions that each host system must provide. diff --git a/include/asm-sparc64/topology.h b/include/asm-sparc64/topology.h index e0d450d600e..290ac75f385 100644 --- a/include/asm-sparc64/topology.h +++ b/include/asm-sparc64/topology.h @@ -1,12 +1,17 @@ #ifndef _ASM_SPARC64_TOPOLOGY_H #define _ASM_SPARC64_TOPOLOGY_H -#include <asm/spitfire.h> -#define smt_capable() (tlb_type == hypervisor) +#ifdef CONFIG_SMP +#define topology_physical_package_id(cpu) (cpu_data(cpu).proc_id) +#define topology_core_id(cpu) (cpu_data(cpu).core_id) +#define topology_core_siblings(cpu) (cpu_core_map[cpu]) +#define topology_thread_siblings(cpu) (cpu_sibling_map[cpu]) +#define mc_capable() (sparc64_multi_core) +#define smt_capable() (sparc64_multi_core) +#endif /* CONFIG_SMP */ #include <asm-generic/topology.h> -#define topology_core_id(cpu) (cpu_data(cpu).core_id) -#define topology_thread_siblings(cpu) (cpu_sibling_map[cpu]) +#define cpu_coregroup_map(cpu) (cpu_core_map[cpu]) #endif /* _ASM_SPARC64_TOPOLOGY_H */ diff --git a/include/linux/fs.h b/include/linux/fs.h index 7cf0c54a46a..b3ae77cccbb 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -938,6 +938,7 @@ struct super_block { struct list_head s_files; struct block_device *s_bdev; + struct mtd_info *s_mtd; struct list_head s_instances; struct quota_info s_dquot; /* Diskquota specific options */ diff --git a/include/linux/libata.h b/include/linux/libata.h index 85f7b1bd148..a6a3113120a 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h @@ -171,7 +171,6 @@ enum { ATA_FLAG_SKIP_D2H_BSY = (1 << 12), /* can't wait for the first D2H * Register FIS clearing BSY */ ATA_FLAG_DEBUGMSG = (1 << 13), - ATA_FLAG_SETXFER_POLLING= (1 << 14), /* use polling for SETXFER */ ATA_FLAG_IGN_SIMPLEX = (1 << 15), /* ignore SIMPLEX */ ATA_FLAG_NO_IORDY = (1 << 16), /* controller lacks iordy */ ATA_FLAG_ACPI_SATA = (1 << 17), /* need native SATA ACPI layout */ diff --git a/include/linux/mtd/super.h b/include/linux/mtd/super.h new file mode 100644 index 00000000000..4016dd6fe33 --- /dev/null +++ b/include/linux/mtd/super.h @@ -0,0 +1,30 @@ +/* MTD-based superblock handling + * + * Copyright © 2006 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#ifndef __MTD_SUPER_H__ +#define __MTD_SUPER_H__ + +#ifdef __KERNEL__ + +#include <linux/mtd/mtd.h> +#include <linux/fs.h> +#include <linux/mount.h> + +extern int get_sb_mtd(struct file_system_type *fs_type, int flags, + const char *dev_name, void *data, + int (*fill_super)(struct super_block *, void *, int), + struct vfsmount *mnt); +extern void kill_mtd_super(struct super_block *sb); + + +#endif /* __KERNEL__ */ + +#endif /* __MTD_SUPER_H__ */ diff --git a/include/net/af_unix.h b/include/net/af_unix.h index c0398f5a8cb..65f49fd7def 100644 --- a/include/net/af_unix.h +++ b/include/net/af_unix.h @@ -62,13 +62,11 @@ struct unix_skb_parms { #define UNIXCREDS(skb) (&UNIXCB((skb)).creds) #define UNIXSID(skb) (&UNIXCB((skb)).secid) -#define unix_state_rlock(s) spin_lock(&unix_sk(s)->lock) -#define unix_state_runlock(s) spin_unlock(&unix_sk(s)->lock) -#define unix_state_wlock(s) spin_lock(&unix_sk(s)->lock) -#define unix_state_wlock_nested(s) \ +#define unix_state_lock(s) spin_lock(&unix_sk(s)->lock) +#define unix_state_unlock(s) spin_unlock(&unix_sk(s)->lock) +#define unix_state_lock_nested(s) \ spin_lock_nested(&unix_sk(s)->lock, \ SINGLE_DEPTH_NESTING) -#define unix_state_wunlock(s) spin_unlock(&unix_sk(s)->lock) #ifdef __KERNEL__ /* The AF_UNIX socket */ diff --git a/kernel/signal.c b/kernel/signal.c index acdfc0549c6..fe590e00db8 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -105,7 +105,11 @@ static int recalc_sigpending_tsk(struct task_struct *t) set_tsk_thread_flag(t, TIF_SIGPENDING); return 1; } - clear_tsk_thread_flag(t, TIF_SIGPENDING); + /* + * We must never clear the flag in another thread, or in current + * when it's possible the current syscall is returning -ERESTART*. + * So we don't clear it here, and only callers who know they should do. + */ return 0; } @@ -121,7 +125,9 @@ void recalc_sigpending_and_wake(struct task_struct *t) void recalc_sigpending(void) { - recalc_sigpending_tsk(current); + if (!recalc_sigpending_tsk(current)) + clear_thread_flag(TIF_SIGPENDING); + } /* Given the mask, find the first available signal that should be serviced. */ @@ -385,7 +391,8 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info) } } } - recalc_sigpending_tsk(tsk); + if (likely(tsk == current)) + recalc_sigpending(); if (signr && unlikely(sig_kernel_stop(signr))) { /* * Set a marker that we have dequeued a stop signal. Our @@ -1580,8 +1587,9 @@ static void ptrace_stop(int exit_code, int nostop_code, siginfo_t *info) /* * Queued signals ignored us while we were stopped for tracing. * So check for any that we should take before resuming user mode. + * This sets TIF_SIGPENDING, but never clears it. */ - recalc_sigpending(); + recalc_sigpending_tsk(current); } void ptrace_notify(int exit_code) diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c index ceef57c9ab3..de78c9dd713 100644 --- a/net/8021q/vlan.c +++ b/net/8021q/vlan.c @@ -736,8 +736,7 @@ static int vlan_ioctl_handler(void __user *arg) case SET_VLAN_NAME_TYPE_CMD: if (!capable(CAP_NET_ADMIN)) return -EPERM; - if ((args.u.name_type >= 0) && - (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) { + if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) { vlan_name_type = args.u.name_type; err = 0; } else { diff --git a/net/core/sock.c b/net/core/sock.c index 7e51d3a5e4f..c14ce0198d2 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -998,7 +998,7 @@ void sk_setup_caps(struct sock *sk, struct dst_entry *dst) __sk_dst_set(sk, dst); sk->sk_route_caps = dst->dev->features; if (sk->sk_route_caps & NETIF_F_GSO) - sk->sk_route_caps |= NETIF_F_GSO_MASK; + sk->sk_route_caps |= NETIF_F_GSO_SOFTWARE; if (sk_can_gso(sk)) { if (dst->header_len) sk->sk_route_caps &= ~NETIF_F_GSO_MASK; diff --git a/net/dccp/probe.c b/net/dccp/probe.c index 1f5e3ba6206..43a3adb027e 100644 --- a/net/dccp/probe.c +++ b/net/dccp/probe.c @@ -128,7 +128,7 @@ static ssize_t dccpprobe_read(struct file *file, char __user *buf, int error = 0, cnt = 0; unsigned char *tbuf; - if (!buf || len < 0) + if (!buf) return -EINVAL; if (len == 0) diff --git a/net/ipv4/datagram.c b/net/ipv4/datagram.c index dd02a45d0f6..0301dd468cf 100644 --- a/net/ipv4/datagram.c +++ b/net/ipv4/datagram.c @@ -50,8 +50,12 @@ int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) RT_CONN_FLAGS(sk), oif, sk->sk_protocol, inet->sport, usin->sin_port, sk, 1); - if (err) + if (err) { + if (err == -ENETUNREACH) + IP_INC_STATS_BH(IPSTATS_MIB_OUTNOROUTES); return err; + } + if ((rt->rt_flags & RTCF_BROADCAST) && !sock_flag(sk, SOCK_BROADCAST)) { ip_rt_put(rt); return -EACCES; diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c index e238b17f554..02a899bec19 100644 --- a/net/ipv4/icmp.c +++ b/net/ipv4/icmp.c @@ -514,12 +514,15 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info) saddr = iph->daddr; if (!(rt->rt_flags & RTCF_LOCAL)) { - /* This is broken, skb_in->dev points to the outgoing device - * after the packet passes through ip_output(). - */ - if (skb_in->dev && sysctl_icmp_errors_use_inbound_ifaddr) - saddr = inet_select_addr(skb_in->dev, 0, RT_SCOPE_LINK); - else + struct net_device *dev = NULL; + + if (rt->fl.iif && sysctl_icmp_errors_use_inbound_ifaddr) + dev = dev_get_by_index(rt->fl.iif); + + if (dev) { + saddr = inet_select_addr(dev, 0, RT_SCOPE_LINK); + dev_put(dev); + } else saddr = 0; } diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index 43fb1600f1f..fbe7714f21d 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c @@ -31,10 +31,8 @@ EXPORT_SYMBOL(inet_csk_timer_bug_msg); /* * This array holds the first and last local port number. - * For high-usage systems, use sysctl to change this to - * 32768-61000 */ -int sysctl_local_port_range[2] = { 1024, 4999 }; +int sysctl_local_port_range[2] = { 32768, 61000 }; int inet_csk_bind_conflict(const struct sock *sk, const struct inet_bind_bucket *tb) diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 766314505c0..cd3c7e95de9 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -2464,13 +2464,10 @@ void __init tcp_init(void) order++) ; if (order >= 4) { - sysctl_local_port_range[0] = 32768; - sysctl_local_port_range[1] = 61000; tcp_death_row.sysctl_max_tw_buckets = 180000; sysctl_tcp_max_orphans = 4096 << (order - 4); sysctl_max_syn_backlog = 1024; } else if (order < 3) { - sysctl_local_port_range[0] = 1024 * (3 - order); tcp_death_row.sysctl_max_tw_buckets >>= (3 - order); sysctl_tcp_max_orphans >>= (3 - order); sysctl_max_syn_backlog = 128; diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 38cb25b48bf..74683d81c3f 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -2407,8 +2407,8 @@ static int tcp_clean_rtx_queue(struct sock *sk, __s32 *seq_rtt_p) struct sk_buff *skb; __u32 now = tcp_time_stamp; int acked = 0; + int prior_packets = tp->packets_out; __s32 seq_rtt = -1; - u32 pkts_acked = 0; ktime_t last_ackt = ktime_set(0,0); while ((skb = tcp_write_queue_head(sk)) && @@ -2437,7 +2437,6 @@ static int tcp_clean_rtx_queue(struct sock *sk, __s32 *seq_rtt_p) */ if (!(scb->flags & TCPCB_FLAG_SYN)) { acked |= FLAG_DATA_ACKED; - ++pkts_acked; } else { acked |= FLAG_SYN_ACKED; tp->retrans_stamp = 0; @@ -2481,6 +2480,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, __s32 *seq_rtt_p) } if (acked&FLAG_ACKED) { + u32 pkts_acked = prior_packets - tp->packets_out; const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops; diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 5a3e7f839fc..47c61055eb6 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -192,8 +192,11 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) RT_CONN_FLAGS(sk), sk->sk_bound_dev_if, IPPROTO_TCP, inet->sport, usin->sin_port, sk, 1); - if (tmp < 0) + if (tmp < 0) { + if (tmp == -ENETUNREACH) + IP_INC_STATS_BH(IPSTATS_MIB_OUTNOROUTES); return tmp; + } if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) { ip_rt_put(rt); diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 4c7e95fa090..5da703e699d 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -722,8 +722,11 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, .dport = dport } } }; security_sk_classify_flow(sk, &fl); err = ip_route_output_flow(&rt, &fl, sk, 1); - if (err) + if (err) { + if (err == -ENETUNREACH) + IP_INC_STATS_BH(IPSTATS_MIB_OUTNOROUTES); goto out; + } err = -EACCES; if ((rt->rt_flags & RTCF_BROADCAST) && diff --git a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c index 6d2a0820511..dc442fb791b 100644 --- a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c +++ b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c @@ -177,8 +177,7 @@ static unsigned int ipv6_confirm(unsigned int hooknum, protoff = nf_ct_ipv6_skip_exthdr(*pskb, extoff, &pnum, (*pskb)->len - extoff); - if (protoff < 0 || protoff > (*pskb)->len || - pnum == NEXTHDR_FRAGMENT) { + if (protoff > (*pskb)->len || pnum == NEXTHDR_FRAGMENT) { DEBUGP("proto header not found\n"); return NF_ACCEPT; } diff --git a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c index 0be790d250f..8814b95b232 100644 --- a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c +++ b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c @@ -168,8 +168,7 @@ icmpv6_error_message(struct sk_buff *skb, skb->len - inip6off - sizeof(struct ipv6hdr)); - if ((inprotoff < 0) || (inprotoff > skb->len) || - (inprotonum == NEXTHDR_FRAGMENT)) { + if ((inprotoff > skb->len) || (inprotonum == NEXTHDR_FRAGMENT)) { DEBUGP("icmpv6_error: Can't get protocol header in ICMPv6 payload.\n"); return -NF_ACCEPT; } diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c index 45b3cda86a2..6f8684b5617 100644 --- a/net/sched/act_pedit.c +++ b/net/sched/act_pedit.c @@ -164,8 +164,7 @@ static int tcf_pedit(struct sk_buff *skb, struct tc_action *a, printk("offset must be on 32 bit boundaries\n"); goto bad; } - if (skb->len < 0 || - (offset > 0 && offset > skb->len)) { + if (offset > 0 && offset > skb->len) { printk("offset %d cant exceed pkt length %d\n", offset, skb->len); goto bad; diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index cbefe225581..f4d34480a09 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c @@ -224,7 +224,8 @@ void __netdev_watchdog_up(struct net_device *dev) if (dev->tx_timeout) { if (dev->watchdog_timeo <= 0) dev->watchdog_timeo = 5*HZ; - if (!mod_timer(&dev->watchdog_timer, jiffies + dev->watchdog_timeo)) + if (!mod_timer(&dev->watchdog_timer, + round_jiffies(jiffies + dev->watchdog_timeo))) dev_hold(dev); } } diff --git a/net/sctp/debug.c b/net/sctp/debug.c index e8c0f7435d7..80f70aa5338 100644 --- a/net/sctp/debug.c +++ b/net/sctp/debug.c @@ -77,8 +77,6 @@ static const char *sctp_cid_tbl[SCTP_NUM_BASE_CHUNK_TYPES] = { /* Lookup "chunk type" debug name. */ const char *sctp_cname(const sctp_subtype_t cid) { - if (cid.chunk < 0) - return "illegal chunk id"; if (cid.chunk <= SCTP_CID_BASE_MAX) return sctp_cid_tbl[cid.chunk]; @@ -146,8 +144,6 @@ static const char *sctp_primitive_tbl[SCTP_NUM_PRIMITIVE_TYPES] = { /* Lookup primitive debug name. */ const char *sctp_pname(const sctp_subtype_t id) { - if (id.primitive < 0) - return "illegal primitive"; if (id.primitive <= SCTP_EVENT_PRIMITIVE_MAX) return sctp_primitive_tbl[id.primitive]; return "unknown_primitive"; @@ -161,8 +157,6 @@ static const char *sctp_other_tbl[] = { /* Lookup "other" debug name. */ const char *sctp_oname(const sctp_subtype_t id) { - if (id.other < 0) - return "illegal 'other' event"; if (id.other <= SCTP_EVENT_OTHER_MAX) return sctp_other_tbl[id.other]; return "unknown 'other' event"; @@ -184,8 +178,6 @@ static const char *sctp_timer_tbl[] = { /* Lookup timer debug name. */ const char *sctp_tname(const sctp_subtype_t id) { - if (id.timeout < 0) - return "illegal 'timer' event"; if (id.timeout <= SCTP_EVENT_TIMEOUT_MAX) return sctp_timer_tbl[id.timeout]; return "unknown_timer"; diff --git a/net/sctp/sm_statetable.c b/net/sctp/sm_statetable.c index 523071c7902..70a91ece3c4 100644 --- a/net/sctp/sm_statetable.c +++ b/net/sctp/sm_statetable.c @@ -960,7 +960,7 @@ static const sctp_sm_table_entry_t *sctp_chunk_event_lookup(sctp_cid_t cid, if (state > SCTP_STATE_MAX) return &bug; - if (cid >= 0 && cid <= SCTP_CID_BASE_MAX) + if (cid <= SCTP_CID_BASE_MAX) return &chunk_event_table[cid][state]; if (sctp_prsctp_enable) { diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index fc12ba51c1f..87c794d8fa2 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -174,11 +174,11 @@ static struct sock *unix_peer_get(struct sock *s) { struct sock *peer; - unix_state_rlock(s); + unix_state_lock(s); peer = unix_peer(s); if (peer) sock_hold(peer); - unix_state_runlock(s); + unix_state_unlock(s); return peer; } @@ -369,7 +369,7 @@ static int unix_release_sock (struct sock *sk, int embrion) unix_remove_socket(sk); /* Clear state */ - unix_state_wlock(sk); + unix_state_lock(sk); sock_orphan(sk); sk->sk_shutdown = SHUTDOWN_MASK; dentry = u->dentry; @@ -378,7 +378,7 @@ static int unix_release_sock (struct sock *sk, int embrion) u->mnt = NULL; state = sk->sk_state; sk->sk_state = TCP_CLOSE; - unix_state_wunlock(sk); + unix_state_unlock(sk); wake_up_interruptible_all(&u->peer_wait); @@ -386,12 +386,12 @@ static int unix_release_sock (struct sock *sk, int embrion) if (skpair!=NULL) { if (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) { - unix_state_wlock(skpair); + unix_state_lock(skpair); /* No more writes */ skpair->sk_shutdown = SHUTDOWN_MASK; if (!skb_queue_empty(&sk->sk_receive_queue) || embrion) skpair->sk_err = ECONNRESET; - unix_state_wunlock(skpair); + unix_state_unlock(skpair); skpair->sk_state_change(skpair); read_lock(&skpair->sk_callback_lock); sk_wake_async(skpair,1,POLL_HUP); @@ -448,7 +448,7 @@ static int unix_listen(struct socket *sock, int backlog) err = -EINVAL; if (!u->addr) goto out; /* No listens on an unbound socket */ - unix_state_wlock(sk); + unix_state_lock(sk); if (sk->sk_state != TCP_CLOSE && sk->sk_state != TCP_LISTEN) goto out_unlock; if (backlog > sk->sk_max_ack_backlog) @@ -462,7 +462,7 @@ static int unix_listen(struct socket *sock, int backlog) err = 0; out_unlock: - unix_state_wunlock(sk); + unix_state_unlock(sk); out: return err; } @@ -858,6 +858,31 @@ out_mknod_parent: goto out_up; } +static void unix_state_double_lock(struct sock *sk1, struct sock *sk2) +{ + if (unlikely(sk1 == sk2) || !sk2) { + unix_state_lock(sk1); + return; + } + if (sk1 < sk2) { + unix_state_lock(sk1); + unix_state_lock_nested(sk2); + } else { + unix_state_lock(sk2); + unix_state_lock_nested(sk1); + } +} + +static void unix_state_double_unlock(struct sock *sk1, struct sock *sk2) +{ + if (unlikely(sk1 == sk2) || !sk2) { + unix_state_unlock(sk1); + return; + } + unix_state_unlock(sk1); + unix_state_unlock(sk2); +} + static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags) { @@ -877,11 +902,19 @@ static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr, !unix_sk(sk)->addr && (err = unix_autobind(sock)) != 0) goto out; +restart: other=unix_find_other(sunaddr, alen, sock->type, hash, &err); if (!other) goto out; - unix_state_wlock(sk); + unix_state_double_lock(sk, other); + + /* Apparently VFS overslept socket death. Retry. */ + if (sock_flag(other, SOCK_DEAD)) { + unix_state_double_unlock(sk, other); + sock_put(other); + goto restart; + } err = -EPERM; if (!unix_may_send(sk, other)) @@ -896,7 +929,7 @@ static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr, * 1003.1g breaking connected state with AF_UNSPEC */ other = NULL; - unix_state_wlock(sk); + unix_state_double_lock(sk, other); } /* @@ -905,19 +938,19 @@ static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr, if (unix_peer(sk)) { struct sock *old_peer = unix_peer(sk); unix_peer(sk)=other; - unix_state_wunlock(sk); + unix_state_double_unlock(sk, other); if (other != old_peer) unix_dgram_disconnected(sk, old_peer); sock_put(old_peer); } else { unix_peer(sk)=other; - unix_state_wunlock(sk); + unix_state_double_unlock(sk, other); } return 0; out_unlock: - unix_state_wunlock(sk); + unix_state_double_unlock(sk, other); sock_put(other); out: return err; @@ -936,7 +969,7 @@ static long unix_wait_for_peer(struct sock *other, long timeo) (skb_queue_len(&other->sk_receive_queue) > other->sk_max_ack_backlog); - unix_state_runlock(other); + unix_state_unlock(other); if (sched) timeo = schedule_timeout(timeo); @@ -994,11 +1027,11 @@ restart: goto out; /* Latch state of peer */ - unix_state_rlock(other); + unix_state_lock(other); /* Apparently VFS overslept socket death. Retry. */ if (sock_flag(other, SOCK_DEAD)) { - unix_state_runlock(other); + unix_state_unlock(other); sock_put(other); goto restart; } @@ -1048,18 +1081,18 @@ restart: goto out_unlock; } - unix_state_wlock_nested(sk); + unix_state_lock_nested(sk); if (sk->sk_state != st) { - unix_state_wunlock(sk); - unix_state_runlock(other); + unix_state_unlock(sk); + unix_state_unlock(other); sock_put(other); goto restart; } err = security_unix_stream_connect(sock, other->sk_socket, newsk); if (err) { - unix_state_wunlock(sk); + unix_state_unlock(sk); goto out_unlock; } @@ -1096,7 +1129,7 @@ restart: smp_mb__after_atomic_inc(); /* sock_hold() does an atomic_inc() */ unix_peer(sk) = newsk; - unix_state_wunlock(sk); + unix_state_unlock(sk); /* take ten and and send info to listening sock */ spin_lock(&other->sk_receive_queue.lock); @@ -1105,14 +1138,14 @@ restart: * is installed to listening socket. */ atomic_inc(&newu->inflight); spin_unlock(&other->sk_receive_queue.lock); - unix_state_runlock(other); + unix_state_unlock(other); other->sk_data_ready(other, 0); sock_put(other); return 0; out_unlock: if (other) - unix_state_runlock(other); + unix_state_unlock(other); out: if (skb) @@ -1178,10 +1211,10 @@ static int unix_accept(struct socket *sock, struct socket *newsock, int flags) wake_up_interruptible(&unix_sk(sk)->peer_wait); /* attach accepted sock to socket */ - unix_state_wlock(tsk); + unix_state_lock(tsk); newsock->state = SS_CONNECTED; sock_graft(tsk, newsock); - unix_state_wunlock(tsk); + unix_state_unlock(tsk); return 0; out: @@ -1208,7 +1241,7 @@ static int unix_getname(struct socket *sock, struct sockaddr *uaddr, int *uaddr_ } u = unix_sk(sk); - unix_state_rlock(sk); + unix_state_lock(sk); if (!u->addr) { sunaddr->sun_family = AF_UNIX; sunaddr->sun_path[0] = 0; @@ -1219,7 +1252,7 @@ static int unix_getname(struct socket *sock, struct sockaddr *uaddr, int *uaddr_ *uaddr_len = addr->len; memcpy(sunaddr, addr->name, *uaddr_len); } - unix_state_runlock(sk); + unix_state_unlock(sk); sock_put(sk); out: return err; @@ -1337,7 +1370,7 @@ restart: goto out_free; } - unix_state_rlock(other); + unix_state_lock(other); err = -EPERM; if (!unix_may_send(sk, other)) goto out_unlock; @@ -1347,20 +1380,20 @@ restart: * Check with 1003.1g - what should * datagram error */ - unix_state_runlock(other); + unix_state_unlock(other); sock_put(other); err = 0; - unix_state_wlock(sk); + unix_state_lock(sk); if (unix_peer(sk) == other) { unix_peer(sk)=NULL; - unix_state_wunlock(sk); + unix_state_unlock(sk); unix_dgram_disconnected(sk, other); sock_put(other); err = -ECONNREFUSED; } else { - unix_state_wunlock(sk); + unix_state_unlock(sk); } other = NULL; @@ -1397,14 +1430,14 @@ restart: } skb_queue_tail(&other->sk_receive_queue, skb); - unix_state_runlock(other); + unix_state_unlock(other); other->sk_data_ready(other, len); sock_put(other); scm_destroy(siocb->scm); return len; out_unlock: - unix_state_runlock(other); + unix_state_unlock(other); out_free: kfree_skb(skb); out: @@ -1494,14 +1527,14 @@ static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock, goto out_err; } - unix_state_rlock(other); + unix_state_lock(other); if (sock_flag(other, SOCK_DEAD) || (other->sk_shutdown & RCV_SHUTDOWN)) goto pipe_err_free; skb_queue_tail(&other->sk_receive_queue, skb); - unix_state_runlock(other); + unix_state_unlock(other); other->sk_data_ready(other, size); sent+=size; } @@ -1512,7 +1545,7 @@ static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock, return sent; pipe_err_free: - unix_state_runlock(other); + unix_state_unlock(other); kfree_skb(skb); pipe_err: if (sent==0 && !(msg->msg_flags&MSG_NOSIGNAL)) @@ -1641,7 +1674,7 @@ static long unix_stream_data_wait(struct sock * sk, long timeo) { DEFINE_WAIT(wait); - unix_state_rlock(sk); + unix_state_lock(sk); for (;;) { prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); @@ -1654,14 +1687,14 @@ static long unix_stream_data_wait(struct sock * sk, long timeo) break; set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); - unix_state_runlock(sk); + unix_state_unlock(sk); timeo = schedule_timeout(timeo); - unix_state_rlock(sk); + unix_state_lock(sk); clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); } finish_wait(sk->sk_sleep, &wait); - unix_state_runlock(sk); + unix_state_unlock(sk); return timeo; } @@ -1816,12 +1849,12 @@ static int unix_shutdown(struct socket *sock, int mode) mode = (mode+1)&(RCV_SHUTDOWN|SEND_SHUTDOWN); if (mode) { - unix_state_wlock(sk); + unix_state_lock(sk); sk->sk_shutdown |= mode; other=unix_peer(sk); if (other) sock_hold(other); - unix_state_wunlock(sk); + unix_state_unlock(sk); sk->sk_state_change(sk); if (other && @@ -1833,9 +1866,9 @@ static int unix_shutdown(struct socket *sock, int mode) peer_mode |= SEND_SHUTDOWN; if (mode&SEND_SHUTDOWN) peer_mode |= RCV_SHUTDOWN; - unix_state_wlock(other); + unix_state_lock(other); other->sk_shutdown |= peer_mode; - unix_state_wunlock(other); + unix_state_unlock(other); other->sk_state_change(other); read_lock(&other->sk_callback_lock); if (peer_mode == SHUTDOWN_MASK) @@ -1973,7 +2006,7 @@ static int unix_seq_show(struct seq_file *seq, void *v) else { struct sock *s = v; struct unix_sock *u = unix_sk(s); - unix_state_rlock(s); + unix_state_lock(s); seq_printf(seq, "%p: %08X %08X %08X %04X %02X %5lu", s, @@ -2001,7 +2034,7 @@ static int unix_seq_show(struct seq_file *seq, void *v) for ( ; i < len; i++) seq_putc(seq, u->addr->name->sun_path[i]); } - unix_state_runlock(s); + unix_state_unlock(s); seq_putc(seq, '\n'); } diff --git a/net/wanrouter/wanmain.c b/net/wanrouter/wanmain.c index 7a19e0ede28..849cc06bd91 100644 --- a/net/wanrouter/wanmain.c +++ b/net/wanrouter/wanmain.c @@ -454,7 +454,7 @@ static int wanrouter_device_setup(struct wan_device *wandev, } if (conf->data_size && conf->data) { - if (conf->data_size > 128000 || conf->data_size < 0) { + if (conf->data_size > 128000) { printk(KERN_INFO "%s: ERROR, Invalid firmware data size %i !\n", wandev->name, conf->data_size); diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl index e216d49624b..e216d49624b 100644..100755 --- a/scripts/checkpatch.pl +++ b/scripts/checkpatch.pl |