diff options
Diffstat (limited to 'arch/powerpc/platforms')
-rw-r--r-- | arch/powerpc/platforms/52xx/mpc52xx_common.c | 1 | ||||
-rw-r--r-- | arch/powerpc/platforms/83xx/mpc837x_mds.c | 8 | ||||
-rw-r--r-- | arch/powerpc/platforms/Kconfig | 1 | ||||
-rw-r--r-- | arch/powerpc/platforms/Kconfig.cputype | 7 | ||||
-rw-r--r-- | arch/powerpc/platforms/cell/iommu.c | 151 | ||||
-rw-r--r-- | arch/powerpc/platforms/cell/setup.c | 7 | ||||
-rw-r--r-- | arch/powerpc/platforms/cell/spu_base.c | 16 | ||||
-rw-r--r-- | arch/powerpc/platforms/cell/spufs/context.c | 10 | ||||
-rw-r--r-- | arch/powerpc/platforms/cell/spufs/file.c | 12 | ||||
-rw-r--r-- | arch/powerpc/platforms/cell/spufs/run.c | 3 | ||||
-rw-r--r-- | arch/powerpc/platforms/cell/spufs/sched.c | 6 | ||||
-rw-r--r-- | arch/powerpc/platforms/cell/spufs/spufs.h | 1 | ||||
-rw-r--r-- | arch/powerpc/platforms/cell/spufs/sputrace.c | 7 | ||||
-rw-r--r-- | arch/powerpc/platforms/cell/spufs/switch.c | 6 | ||||
-rw-r--r-- | arch/powerpc/platforms/celleb/beat.h | 3 | ||||
-rw-r--r-- | arch/powerpc/platforms/powermac/pic.c | 2 |
16 files changed, 151 insertions, 90 deletions
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_common.c b/arch/powerpc/platforms/52xx/mpc52xx_common.c index 9aa4425d80b..4d5fd1dbd40 100644 --- a/arch/powerpc/platforms/52xx/mpc52xx_common.c +++ b/arch/powerpc/platforms/52xx/mpc52xx_common.c @@ -199,6 +199,7 @@ int mpc52xx_set_psc_clkdiv(int psc_id, int clkdiv) return 0; } +EXPORT_SYMBOL(mpc52xx_set_psc_clkdiv); /** * mpc52xx_restart: ppc_md->restart hook for mpc5200 using the watchdog timer diff --git a/arch/powerpc/platforms/83xx/mpc837x_mds.c b/arch/powerpc/platforms/83xx/mpc837x_mds.c index 8a9c2697360..64d17b0d645 100644 --- a/arch/powerpc/platforms/83xx/mpc837x_mds.c +++ b/arch/powerpc/platforms/83xx/mpc837x_mds.c @@ -39,12 +39,9 @@ static int mpc837xmds_usb_cfg(void) if (ret) return ret; /* Map BCSR area */ - np = of_find_node_by_name(NULL, "bcsr"); + np = of_find_compatible_node(NULL, NULL, "fsl,mpc837xmds-bcsr"); if (np) { - struct resource res; - - of_address_to_resource(np, 0, &res); - bcsr_regs = ioremap(res.start, res.end - res.start + 1); + bcsr_regs = of_iomap(np, 0); of_node_put(np); } if (!bcsr_regs) @@ -96,6 +93,7 @@ static void __init mpc837x_mds_setup_arch(void) static struct of_device_id mpc837x_ids[] = { { .type = "soc", }, { .compatible = "soc", }, + { .compatible = "simple-bus", }, {}, }; diff --git a/arch/powerpc/platforms/Kconfig b/arch/powerpc/platforms/Kconfig index 0afd2259554..a578b966ecb 100644 --- a/arch/powerpc/platforms/Kconfig +++ b/arch/powerpc/platforms/Kconfig @@ -22,7 +22,6 @@ config PPC_83xx select FSL_SOC select MPC83xx select IPIC - select FSL_EMB_PERFMON config PPC_86xx bool "Freescale 86xx" diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype index 73d81ce14b6..0c3face0ddb 100644 --- a/arch/powerpc/platforms/Kconfig.cputype +++ b/arch/powerpc/platforms/Kconfig.cputype @@ -113,7 +113,12 @@ config FSL_BOOKE default y config FSL_EMB_PERFMON - bool + bool "Freescale Embedded Perfmon" + depends on E500 || PPC_83xx + help + This is the Performance Monitor support found on the e500 core + and some e300 cores (c3 and c4). Select this only if your + core supports the Embedded Performance Monitor APU config PTE_64BIT bool diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c index edab631a8dc..20ea0e118f2 100644 --- a/arch/powerpc/platforms/cell/iommu.c +++ b/arch/powerpc/platforms/cell/iommu.c @@ -113,7 +113,7 @@ /* IOMMU sizing */ #define IO_SEGMENT_SHIFT 28 -#define IO_PAGENO_BITS (IO_SEGMENT_SHIFT - IOMMU_PAGE_SHIFT) +#define IO_PAGENO_BITS(shift) (IO_SEGMENT_SHIFT - (shift)) /* The high bit needs to be set on every DMA address */ #define SPIDER_DMA_OFFSET 0x80000000ul @@ -123,7 +123,6 @@ struct iommu_window { struct cbe_iommu *iommu; unsigned long offset; unsigned long size; - unsigned long pte_offset; unsigned int ioid; struct iommu_table table; }; @@ -200,7 +199,7 @@ static void tce_build_cell(struct iommu_table *tbl, long index, long npages, (window->ioid & IOPTE_IOID_Mask); #endif - io_pte = (unsigned long *)tbl->it_base + (index - window->pte_offset); + io_pte = (unsigned long *)tbl->it_base + (index - tbl->it_offset); for (i = 0; i < npages; i++, uaddr += IOMMU_PAGE_SIZE) io_pte[i] = base_pte | (__pa(uaddr) & IOPTE_RPN_Mask); @@ -232,7 +231,7 @@ static void tce_free_cell(struct iommu_table *tbl, long index, long npages) | (window->ioid & IOPTE_IOID_Mask); #endif - io_pte = (unsigned long *)tbl->it_base + (index - window->pte_offset); + io_pte = (unsigned long *)tbl->it_base + (index - tbl->it_offset); for (i = 0; i < npages; i++) io_pte[i] = pte; @@ -307,76 +306,84 @@ static int cell_iommu_find_ioc(int nid, unsigned long *base) return -ENODEV; } -static void cell_iommu_setup_page_tables(struct cbe_iommu *iommu, +static void cell_iommu_setup_stab(struct cbe_iommu *iommu, unsigned long dbase, unsigned long dsize, unsigned long fbase, unsigned long fsize) { struct page *page; - int i; - unsigned long reg, segments, pages_per_segment, ptab_size, stab_size, - n_pte_pages, base; - - base = dbase; - if (fsize != 0) - base = min(fbase, dbase); + unsigned long segments, stab_size; segments = max(dbase + dsize, fbase + fsize) >> IO_SEGMENT_SHIFT; - pages_per_segment = 1ull << IO_PAGENO_BITS; - pr_debug("%s: iommu[%d]: segments: %lu, pages per segment: %lu\n", - __FUNCTION__, iommu->nid, segments, pages_per_segment); + pr_debug("%s: iommu[%d]: segments: %lu\n", + __FUNCTION__, iommu->nid, segments); /* set up the segment table */ stab_size = segments * sizeof(unsigned long); page = alloc_pages_node(iommu->nid, GFP_KERNEL, get_order(stab_size)); BUG_ON(!page); iommu->stab = page_address(page); - clear_page(iommu->stab); + memset(iommu->stab, 0, stab_size); +} + +static unsigned long *cell_iommu_alloc_ptab(struct cbe_iommu *iommu, + unsigned long base, unsigned long size, unsigned long gap_base, + unsigned long gap_size, unsigned long page_shift) +{ + struct page *page; + int i; + unsigned long reg, segments, pages_per_segment, ptab_size, + n_pte_pages, start_seg, *ptab; + + start_seg = base >> IO_SEGMENT_SHIFT; + segments = size >> IO_SEGMENT_SHIFT; + pages_per_segment = 1ull << IO_PAGENO_BITS(page_shift); + /* PTEs for each segment must start on a 4K bounday */ + pages_per_segment = max(pages_per_segment, + (1 << 12) / sizeof(unsigned long)); - /* ... and the page tables. Since these are contiguous, we can treat - * the page tables as one array of ptes, like pSeries does. - */ ptab_size = segments * pages_per_segment * sizeof(unsigned long); pr_debug("%s: iommu[%d]: ptab_size: %lu, order: %d\n", __FUNCTION__, iommu->nid, ptab_size, get_order(ptab_size)); page = alloc_pages_node(iommu->nid, GFP_KERNEL, get_order(ptab_size)); BUG_ON(!page); - iommu->ptab = page_address(page); - memset(iommu->ptab, 0, ptab_size); + ptab = page_address(page); + memset(ptab, 0, ptab_size); - /* allocate a bogus page for the end of each mapping */ - page = alloc_pages_node(iommu->nid, GFP_KERNEL, 0); - BUG_ON(!page); - iommu->pad_page = page_address(page); - clear_page(iommu->pad_page); - - /* number of pages needed for a page table */ - n_pte_pages = (pages_per_segment * - sizeof(unsigned long)) >> IOMMU_PAGE_SHIFT; + /* number of 4K pages needed for a page table */ + n_pte_pages = (pages_per_segment * sizeof(unsigned long)) >> 12; pr_debug("%s: iommu[%d]: stab at %p, ptab at %p, n_pte_pages: %lu\n", - __FUNCTION__, iommu->nid, iommu->stab, iommu->ptab, + __FUNCTION__, iommu->nid, iommu->stab, ptab, n_pte_pages); /* initialise the STEs */ reg = IOSTE_V | ((n_pte_pages - 1) << 5); - if (IOMMU_PAGE_SIZE == 0x1000) - reg |= IOSTE_PS_4K; - else if (IOMMU_PAGE_SIZE == 0x10000) - reg |= IOSTE_PS_64K; - else { - extern void __unknown_page_size_error(void); - __unknown_page_size_error(); + switch (page_shift) { + case 12: reg |= IOSTE_PS_4K; break; + case 16: reg |= IOSTE_PS_64K; break; + case 20: reg |= IOSTE_PS_1M; break; + case 24: reg |= IOSTE_PS_16M; break; + default: BUG(); } + gap_base = gap_base >> IO_SEGMENT_SHIFT; + gap_size = gap_size >> IO_SEGMENT_SHIFT; + pr_debug("Setting up IOMMU stab:\n"); - for (i = base >> IO_SEGMENT_SHIFT; i < segments; i++) { - iommu->stab[i] = reg | - (__pa(iommu->ptab) + n_pte_pages * IOMMU_PAGE_SIZE * i); + for (i = start_seg; i < (start_seg + segments); i++) { + if (i >= gap_base && i < (gap_base + gap_size)) { + pr_debug("\toverlap at %d, skipping\n", i); + continue; + } + iommu->stab[i] = reg | (__pa(ptab) + (n_pte_pages << 12) * + (i - start_seg)); pr_debug("\t[%d] 0x%016lx\n", i, iommu->stab[i]); } + + return ptab; } static void cell_iommu_enable_hardware(struct cbe_iommu *iommu) @@ -423,7 +430,9 @@ static void cell_iommu_enable_hardware(struct cbe_iommu *iommu) static void cell_iommu_setup_hardware(struct cbe_iommu *iommu, unsigned long base, unsigned long size) { - cell_iommu_setup_page_tables(iommu, base, size, 0, 0); + cell_iommu_setup_stab(iommu, base, size, 0, 0); + iommu->ptab = cell_iommu_alloc_ptab(iommu, base, size, 0, 0, + IOMMU_PAGE_SHIFT); cell_iommu_enable_hardware(iommu); } @@ -464,6 +473,7 @@ cell_iommu_setup_window(struct cbe_iommu *iommu, struct device_node *np, unsigned long pte_offset) { struct iommu_window *window; + struct page *page; u32 ioid; ioid = cell_iommu_get_ioid(np); @@ -475,13 +485,11 @@ cell_iommu_setup_window(struct cbe_iommu *iommu, struct device_node *np, window->size = size; window->ioid = ioid; window->iommu = iommu; - window->pte_offset = pte_offset; window->table.it_blocksize = 16; window->table.it_base = (unsigned long)iommu->ptab; window->table.it_index = iommu->nid; - window->table.it_offset = (offset >> IOMMU_PAGE_SHIFT) + - window->pte_offset; + window->table.it_offset = (offset >> IOMMU_PAGE_SHIFT) + pte_offset; window->table.it_size = size >> IOMMU_PAGE_SHIFT; iommu_init_table(&window->table, iommu->nid); @@ -504,6 +512,11 @@ cell_iommu_setup_window(struct cbe_iommu *iommu, struct device_node *np, * This code also assumes that we have a window that starts at 0, * which is the case on all spider based blades. */ + page = alloc_pages_node(iommu->nid, GFP_KERNEL, 0); + BUG_ON(!page); + iommu->pad_page = page_address(page); + clear_page(iommu->pad_page); + __set_bit(0, window->table.it_map); tce_build_cell(&window->table, window->table.it_offset, 1, (unsigned long)iommu->pad_page, DMA_TO_DEVICE); @@ -549,7 +562,7 @@ static void cell_dma_dev_setup_iommu(struct device *dev) archdata->dma_data = &window->table; } -static void cell_dma_dev_setup_static(struct device *dev); +static void cell_dma_dev_setup_fixed(struct device *dev); static void cell_dma_dev_setup(struct device *dev) { @@ -557,7 +570,7 @@ static void cell_dma_dev_setup(struct device *dev) /* Order is important here, these are not mutually exclusive */ if (get_dma_ops(dev) == &dma_iommu_fixed_ops) - cell_dma_dev_setup_static(dev); + cell_dma_dev_setup_fixed(dev); else if (get_pci_dma_ops() == &dma_iommu_ops) cell_dma_dev_setup_iommu(dev); else if (get_pci_dma_ops() == &dma_direct_ops) @@ -858,7 +871,7 @@ static int dma_set_mask_and_switch(struct device *dev, u64 dma_mask) return 0; } -static void cell_dma_dev_setup_static(struct device *dev) +static void cell_dma_dev_setup_fixed(struct device *dev) { struct dev_archdata *archdata = &dev->archdata; u64 addr; @@ -869,35 +882,45 @@ static void cell_dma_dev_setup_static(struct device *dev) dev_dbg(dev, "iommu: fixed addr = %lx\n", addr); } +static void insert_16M_pte(unsigned long addr, unsigned long *ptab, + unsigned long base_pte) +{ + unsigned long segment, offset; + + segment = addr >> IO_SEGMENT_SHIFT; + offset = (addr >> 24) - (segment << IO_PAGENO_BITS(24)); + ptab = ptab + (segment * (1 << 12) / sizeof(unsigned long)); + + pr_debug("iommu: addr %lx ptab %p segment %lx offset %lx\n", + addr, ptab, segment, offset); + + ptab[offset] = base_pte | (__pa(addr) & IOPTE_RPN_Mask); +} + static void cell_iommu_setup_fixed_ptab(struct cbe_iommu *iommu, struct device_node *np, unsigned long dbase, unsigned long dsize, unsigned long fbase, unsigned long fsize) { - unsigned long base_pte, uaddr, *io_pte; - int i; + unsigned long base_pte, uaddr, ioaddr, *ptab; - dma_iommu_fixed_base = fbase; + ptab = cell_iommu_alloc_ptab(iommu, fbase, fsize, dbase, dsize, 24); - /* convert from bytes into page table indices */ - dbase = dbase >> IOMMU_PAGE_SHIFT; - dsize = dsize >> IOMMU_PAGE_SHIFT; - fbase = fbase >> IOMMU_PAGE_SHIFT; - fsize = fsize >> IOMMU_PAGE_SHIFT; + dma_iommu_fixed_base = fbase; pr_debug("iommu: mapping 0x%lx pages from 0x%lx\n", fsize, fbase); - io_pte = iommu->ptab; base_pte = IOPTE_PP_W | IOPTE_PP_R | IOPTE_M | IOPTE_SO_RW | (cell_iommu_get_ioid(np) & IOPTE_IOID_Mask); - uaddr = 0; - for (i = fbase; i < fbase + fsize; i++, uaddr += IOMMU_PAGE_SIZE) { + for (uaddr = 0; uaddr < fsize; uaddr += (1 << 24)) { /* Don't touch the dynamic region */ - if (i >= dbase && i < (dbase + dsize)) { - pr_debug("iommu: static/dynamic overlap, skipping\n"); + ioaddr = uaddr + fbase; + if (ioaddr >= dbase && ioaddr < (dbase + dsize)) { + pr_debug("iommu: fixed/dynamic overlap, skipping\n"); continue; } - io_pte[i] = base_pte | (__pa(uaddr) & IOPTE_RPN_Mask); + + insert_16M_pte(uaddr, ptab, base_pte); } mb(); @@ -995,7 +1018,9 @@ static int __init cell_iommu_fixed_mapping_init(void) "fixed window 0x%lx-0x%lx\n", iommu->nid, dbase, dbase + dsize, fbase, fbase + fsize); - cell_iommu_setup_page_tables(iommu, dbase, dsize, fbase, fsize); + cell_iommu_setup_stab(iommu, dbase, dsize, fbase, fsize); + iommu->ptab = cell_iommu_alloc_ptab(iommu, dbase, dsize, 0, 0, + IOMMU_PAGE_SHIFT); cell_iommu_setup_fixed_ptab(iommu, np, dbase, dsize, fbase, fsize); cell_iommu_enable_hardware(iommu); diff --git a/arch/powerpc/platforms/cell/setup.c b/arch/powerpc/platforms/cell/setup.c index a7f609b3b87..dda34650cb0 100644 --- a/arch/powerpc/platforms/cell/setup.c +++ b/arch/powerpc/platforms/cell/setup.c @@ -149,6 +149,11 @@ static void __init cell_init_irq(void) mpic_init_IRQ(); } +static void __init cell_set_dabrx(void) +{ + mtspr(SPRN_DABRX, DABRX_KERNEL | DABRX_USER); +} + static void __init cell_setup_arch(void) { #ifdef CONFIG_SPU_BASE @@ -158,6 +163,8 @@ static void __init cell_setup_arch(void) cbe_regs_init(); + cell_set_dabrx(); + #ifdef CONFIG_CBE_RAS cbe_ras_init(); #endif diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c index 87eb07f94c5..712001f6b7d 100644 --- a/arch/powerpc/platforms/cell/spu_base.c +++ b/arch/powerpc/platforms/cell/spu_base.c @@ -81,9 +81,12 @@ struct spu_slb { void spu_invalidate_slbs(struct spu *spu) { struct spu_priv2 __iomem *priv2 = spu->priv2; + unsigned long flags; + spin_lock_irqsave(&spu->register_lock, flags); if (spu_mfc_sr1_get(spu) & MFC_STATE1_RELOCATE_MASK) out_be64(&priv2->slb_invalidate_all_W, 0UL); + spin_unlock_irqrestore(&spu->register_lock, flags); } EXPORT_SYMBOL_GPL(spu_invalidate_slbs); @@ -148,7 +151,11 @@ static inline void spu_load_slb(struct spu *spu, int slbe, struct spu_slb *slb) __func__, slbe, slb->vsid, slb->esid); out_be64(&priv2->slb_index_W, slbe); + /* set invalid before writing vsid */ + out_be64(&priv2->slb_esid_RW, 0); + /* now it's safe to write the vsid */ out_be64(&priv2->slb_vsid_RW, slb->vsid); + /* setting the new esid makes the entry valid again */ out_be64(&priv2->slb_esid_RW, slb->esid); } @@ -290,9 +297,11 @@ void spu_setup_kernel_slbs(struct spu *spu, struct spu_lscsa *lscsa, nr_slbs++; } + spin_lock_irq(&spu->register_lock); /* Add the set of SLBs */ for (i = 0; i < nr_slbs; i++) spu_load_slb(spu, i, &slbs[i]); + spin_unlock_irq(&spu->register_lock); } EXPORT_SYMBOL_GPL(spu_setup_kernel_slbs); @@ -337,13 +346,14 @@ spu_irq_class_1(int irq, void *data) if (stat & CLASS1_STORAGE_FAULT_INTR) spu_mfc_dsisr_set(spu, 0ul); spu_int_stat_clear(spu, 1, stat); - spin_unlock(&spu->register_lock); - pr_debug("%s: %lx %lx %lx %lx\n", __FUNCTION__, mask, stat, - dar, dsisr); if (stat & CLASS1_SEGMENT_FAULT_INTR) __spu_trap_data_seg(spu, dar); + spin_unlock(&spu->register_lock); + pr_debug("%s: %lx %lx %lx %lx\n", __FUNCTION__, mask, stat, + dar, dsisr); + if (stat & CLASS1_STORAGE_FAULT_INTR) __spu_trap_data_map(spu, dar, dsisr); diff --git a/arch/powerpc/platforms/cell/spufs/context.c b/arch/powerpc/platforms/cell/spufs/context.c index 133995ed5cc..0ad83aeb70b 100644 --- a/arch/powerpc/platforms/cell/spufs/context.c +++ b/arch/powerpc/platforms/cell/spufs/context.c @@ -109,13 +109,12 @@ void spu_forget(struct spu_context *ctx) /* * This is basically an open-coded spu_acquire_saved, except that - * we don't acquire the state mutex interruptible. + * we don't acquire the state mutex interruptible, and we don't + * want this context to be rescheduled on release. */ mutex_lock(&ctx->state_mutex); - if (ctx->state != SPU_STATE_SAVED) { - set_bit(SPU_SCHED_WAS_ACTIVE, &ctx->sched_flags); + if (ctx->state != SPU_STATE_SAVED) spu_deactivate(ctx); - } mm = ctx->owner; ctx->owner = NULL; @@ -171,7 +170,8 @@ void spu_release_saved(struct spu_context *ctx) { BUG_ON(ctx->state != SPU_STATE_SAVED); - if (test_and_clear_bit(SPU_SCHED_WAS_ACTIVE, &ctx->sched_flags)) + if (test_and_clear_bit(SPU_SCHED_WAS_ACTIVE, &ctx->sched_flags) && + test_bit(SPU_SCHED_SPU_RUN, &ctx->sched_flags)) spu_activate(ctx, 0); spu_release(ctx); diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c index c66c3756970..f7a7e8635fb 100644 --- a/arch/powerpc/platforms/cell/spufs/file.c +++ b/arch/powerpc/platforms/cell/spufs/file.c @@ -367,6 +367,13 @@ static unsigned long spufs_ps_nopfn(struct vm_area_struct *vma, return NOPFN_SIGBUS; /* + * Because we release the mmap_sem, the context may be destroyed while + * we're in spu_wait. Grab an extra reference so it isn't destroyed + * in the meantime. + */ + get_spu_context(ctx); + + /* * We have to wait for context to be loaded before we have * pages to hand out to the user, but we don't want to wait * with the mmap_sem held. @@ -375,7 +382,7 @@ static unsigned long spufs_ps_nopfn(struct vm_area_struct *vma, * hanged. */ if (spu_acquire(ctx)) - return NOPFN_REFAULT; + goto refault; if (ctx->state == SPU_STATE_SAVED) { up_read(¤t->mm->mmap_sem); @@ -391,6 +398,9 @@ static unsigned long spufs_ps_nopfn(struct vm_area_struct *vma, if (!ret) spu_release(ctx); + +refault: + put_spu_context(ctx); return NOPFN_REFAULT; } diff --git a/arch/powerpc/platforms/cell/spufs/run.c b/arch/powerpc/platforms/cell/spufs/run.c index 6221968c2a3..cac69e11677 100644 --- a/arch/powerpc/platforms/cell/spufs/run.c +++ b/arch/powerpc/platforms/cell/spufs/run.c @@ -220,6 +220,7 @@ static int spu_run_init(struct spu_context *ctx, u32 *npc) } } + set_bit(SPU_SCHED_SPU_RUN, &ctx->sched_flags); return 0; } @@ -234,7 +235,7 @@ static int spu_run_fini(struct spu_context *ctx, u32 *npc, *npc = ctx->ops->npc_read(ctx); spuctx_switch_state(ctx, SPU_UTIL_IDLE_LOADED); - ctx->policy = SCHED_IDLE; + clear_bit(SPU_SCHED_SPU_RUN, &ctx->sched_flags); spu_release(ctx); if (signal_pending(current)) diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c index 3a5972117de..00528ef84ad 100644 --- a/arch/powerpc/platforms/cell/spufs/sched.c +++ b/arch/powerpc/platforms/cell/spufs/sched.c @@ -246,7 +246,7 @@ static void spu_bind_context(struct spu *spu, struct spu_context *ctx) spu_switch_notify(spu, ctx); ctx->state = SPU_STATE_RUNNABLE; - spuctx_switch_state(ctx, SPU_UTIL_IDLE_LOADED); + spuctx_switch_state(ctx, SPU_UTIL_USER); } /* @@ -867,7 +867,7 @@ static noinline void spusched_tick(struct spu_context *ctx) if (ctx->policy == SCHED_FIFO) goto out; - if (--ctx->time_slice && ctx->policy != SCHED_IDLE) + if (--ctx->time_slice && test_bit(SPU_SCHED_SPU_RUN, &ctx->sched_flags)) goto out; spu = ctx->spu; @@ -877,7 +877,7 @@ static noinline void spusched_tick(struct spu_context *ctx) new = grab_runnable_context(ctx->prio + 1, spu->node); if (new) { spu_unschedule(spu, ctx); - if (ctx->policy != SCHED_IDLE) + if (test_bit(SPU_SCHED_SPU_RUN, &ctx->sched_flags)) spu_add_to_rq(ctx); } else { spu_context_nospu_trace(spusched_tick__newslice, ctx); diff --git a/arch/powerpc/platforms/cell/spufs/spufs.h b/arch/powerpc/platforms/cell/spufs/spufs.h index 2c2fe3c07d7..cdc515182f8 100644 --- a/arch/powerpc/platforms/cell/spufs/spufs.h +++ b/arch/powerpc/platforms/cell/spufs/spufs.h @@ -44,6 +44,7 @@ struct spu_gang; enum { SPU_SCHED_NOTIFY_ACTIVE, SPU_SCHED_WAS_ACTIVE, /* was active upon spu_acquire_saved() */ + SPU_SCHED_SPU_RUN, /* context is within spu_run */ }; struct spu_context { diff --git a/arch/powerpc/platforms/cell/spufs/sputrace.c b/arch/powerpc/platforms/cell/spufs/sputrace.c index 01974f7776e..79aa773f3c9 100644 --- a/arch/powerpc/platforms/cell/spufs/sputrace.c +++ b/arch/powerpc/platforms/cell/spufs/sputrace.c @@ -58,12 +58,12 @@ static int sputrace_sprint(char *tbuf, int n) ktime_to_timespec(ktime_sub(t->tstamp, sputrace_start)); return snprintf(tbuf, n, - "[%lu.%09lu] %d: %s (thread = %d, spu = %d)\n", + "[%lu.%09lu] %d: %s (ctxthread = %d, spu = %d)\n", (unsigned long) tv.tv_sec, (unsigned long) tv.tv_nsec, - t->owner_tid, - t->name, t->curr_tid, + t->name, + t->owner_tid, t->number); } @@ -188,6 +188,7 @@ struct spu_probe spu_probes[] = { { "spufs_ps_nopfn__insert", "%p %p", spu_context_event }, { "spu_acquire_saved__enter", "%p", spu_context_nospu_event }, { "destroy_spu_context__enter", "%p", spu_context_nospu_event }, + { "spufs_stop_callback__enter", "%p %p", spu_context_event }, }; static int __init sputrace_init(void) diff --git a/arch/powerpc/platforms/cell/spufs/switch.c b/arch/powerpc/platforms/cell/spufs/switch.c index 6f5886c7b1f..e9dc7a55d1b 100644 --- a/arch/powerpc/platforms/cell/spufs/switch.c +++ b/arch/powerpc/platforms/cell/spufs/switch.c @@ -34,6 +34,7 @@ #include <linux/module.h> #include <linux/errno.h> +#include <linux/hardirq.h> #include <linux/sched.h> #include <linux/kernel.h> #include <linux/mm.h> @@ -117,6 +118,8 @@ static inline void disable_interrupts(struct spu_state *csa, struct spu *spu) * Write INT_MASK_class1 with value of 0. * Save INT_Mask_class2 in CSA. * Write INT_MASK_class2 with value of 0. + * Synchronize all three interrupts to be sure + * we no longer execute a handler on another CPU. */ spin_lock_irq(&spu->register_lock); if (csa) { @@ -129,6 +132,9 @@ static inline void disable_interrupts(struct spu_state *csa, struct spu *spu) spu_int_mask_set(spu, 2, 0ul); eieio(); spin_unlock_irq(&spu->register_lock); + synchronize_irq(spu->irqs[0]); + synchronize_irq(spu->irqs[1]); + synchronize_irq(spu->irqs[2]); } static inline void set_watchdog_timer(struct spu_state *csa, struct spu *spu) diff --git a/arch/powerpc/platforms/celleb/beat.h b/arch/powerpc/platforms/celleb/beat.h index b2e292df13c..ac82ac35b99 100644 --- a/arch/powerpc/platforms/celleb/beat.h +++ b/arch/powerpc/platforms/celleb/beat.h @@ -21,9 +21,6 @@ #ifndef _CELLEB_BEAT_H #define _CELLEB_BEAT_H -#define DABRX_KERNEL (1UL<<1) -#define DABRX_USER (1UL<<0) - int64_t beat_get_term_char(uint64_t,uint64_t*,uint64_t*,uint64_t*); int64_t beat_put_term_char(uint64_t,uint64_t,uint64_t,uint64_t); int64_t beat_repository_encode(int, const char *, uint64_t[4]); diff --git a/arch/powerpc/platforms/powermac/pic.c b/arch/powerpc/platforms/powermac/pic.c index 40736400ef8..829b8b02527 100644 --- a/arch/powerpc/platforms/powermac/pic.c +++ b/arch/powerpc/platforms/powermac/pic.c @@ -618,9 +618,9 @@ static int pmacpic_find_viaint(void) if (np == NULL) goto not_found; viaint = irq_of_parse_and_map(np, 0);; -#endif /* CONFIG_ADB_PMU */ not_found: +#endif /* CONFIG_ADB_PMU */ return viaint; } |