aboutsummaryrefslogtreecommitdiff
path: root/arch/sparc/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sparc/kernel')
-rw-r--r--arch/sparc/kernel/ioport.c193
-rw-r--r--arch/sparc/kernel/kprobes.c65
-rw-r--r--arch/sparc/kernel/time_64.c2
3 files changed, 38 insertions, 222 deletions
diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c
index cca9134cfa7d..6799c93c9f27 100644
--- a/arch/sparc/kernel/ioport.c
+++ b/arch/sparc/kernel/ioport.c
@@ -38,6 +38,7 @@
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/scatterlist.h>
+#include <linux/dma-noncoherent.h>
#include <linux/of_device.h>
#include <asm/io.h>
@@ -434,42 +435,41 @@ arch_initcall(sparc_register_ioport);
/* Allocate and map kernel buffer using consistent mode DMA for a device.
* hwdev should be valid struct pci_dev pointer for PCI devices.
*/
-static void *pci32_alloc_coherent(struct device *dev, size_t len,
- dma_addr_t *pba, gfp_t gfp,
- unsigned long attrs)
+void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
+ gfp_t gfp, unsigned long attrs)
{
- unsigned long len_total = PAGE_ALIGN(len);
+ unsigned long len_total = PAGE_ALIGN(size);
void *va;
struct resource *res;
int order;
- if (len == 0) {
+ if (size == 0) {
return NULL;
}
- if (len > 256*1024) { /* __get_free_pages() limit */
+ if (size > 256*1024) { /* __get_free_pages() limit */
return NULL;
}
order = get_order(len_total);
va = (void *) __get_free_pages(gfp, order);
if (va == NULL) {
- printk("pci_alloc_consistent: no %ld pages\n", len_total>>PAGE_SHIFT);
+ printk("%s: no %ld pages\n", __func__, len_total>>PAGE_SHIFT);
goto err_nopages;
}
if ((res = kzalloc(sizeof(struct resource), GFP_KERNEL)) == NULL) {
- printk("pci_alloc_consistent: no core\n");
+ printk("%s: no core\n", __func__);
goto err_nomem;
}
if (allocate_resource(&_sparc_dvma, res, len_total,
_sparc_dvma.start, _sparc_dvma.end, PAGE_SIZE, NULL, NULL) != 0) {
- printk("pci_alloc_consistent: cannot occupy 0x%lx", len_total);
+ printk("%s: cannot occupy 0x%lx", __func__, len_total);
goto err_nova;
}
srmmu_mapiorange(0, virt_to_phys(va), res->start, len_total);
- *pba = virt_to_phys(va); /* equals virt_to_bus (R.I.P.) for us. */
+ *dma_handle = virt_to_phys(va);
return (void *) res->start;
err_nova:
@@ -481,184 +481,53 @@ err_nopages:
}
/* Free and unmap a consistent DMA buffer.
- * cpu_addr is what was returned from pci_alloc_consistent,
- * size must be the same as what as passed into pci_alloc_consistent,
- * and likewise dma_addr must be the same as what *dma_addrp was set to.
+ * cpu_addr is what was returned arch_dma_alloc, size must be the same as what
+ * was passed into arch_dma_alloc, and likewise dma_addr must be the same as
+ * what *dma_ndler was set to.
*
* References to the memory and mappings associated with cpu_addr/dma_addr
* past this call are illegal.
*/
-static void pci32_free_coherent(struct device *dev, size_t n, void *p,
- dma_addr_t ba, unsigned long attrs)
+void arch_dma_free(struct device *dev, size_t size, void *cpu_addr,
+ dma_addr_t dma_addr, unsigned long attrs)
{
struct resource *res;
if ((res = lookup_resource(&_sparc_dvma,
- (unsigned long)p)) == NULL) {
- printk("pci_free_consistent: cannot free %p\n", p);
+ (unsigned long)cpu_addr)) == NULL) {
+ printk("%s: cannot free %p\n", __func__, cpu_addr);
return;
}
- if (((unsigned long)p & (PAGE_SIZE-1)) != 0) {
- printk("pci_free_consistent: unaligned va %p\n", p);
+ if (((unsigned long)cpu_addr & (PAGE_SIZE-1)) != 0) {
+ printk("%s: unaligned va %p\n", __func__, cpu_addr);
return;
}
- n = PAGE_ALIGN(n);
- if (resource_size(res) != n) {
- printk("pci_free_consistent: region 0x%lx asked 0x%lx\n",
- (long)resource_size(res), (long)n);
+ size = PAGE_ALIGN(size);
+ if (resource_size(res) != size) {
+ printk("%s: region 0x%lx asked 0x%zx\n", __func__,
+ (long)resource_size(res), size);
return;
}
- dma_make_coherent(ba, n);
- srmmu_unmapiorange((unsigned long)p, n);
+ dma_make_coherent(dma_addr, size);
+ srmmu_unmapiorange((unsigned long)cpu_addr, size);
release_resource(res);
kfree(res);
- free_pages((unsigned long)phys_to_virt(ba), get_order(n));
-}
-
-/*
- * Same as pci_map_single, but with pages.
- */
-static dma_addr_t pci32_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size,
- enum dma_data_direction dir,
- unsigned long attrs)
-{
- /* IIep is write-through, not flushing. */
- return page_to_phys(page) + offset;
-}
-
-static void pci32_unmap_page(struct device *dev, dma_addr_t ba, size_t size,
- enum dma_data_direction dir, unsigned long attrs)
-{
- if (dir != PCI_DMA_TODEVICE && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
- dma_make_coherent(ba, PAGE_ALIGN(size));
-}
-
-/* Map a set of buffers described by scatterlist in streaming
- * mode for DMA. This is the scatter-gather version of the
- * above pci_map_single interface. Here the scatter gather list
- * elements are each tagged with the appropriate dma address
- * and length. They are obtained via sg_dma_{address,length}(SG).
- *
- * NOTE: An implementation may be able to use a smaller number of
- * DMA address/length pairs than there are SG table elements.
- * (for example via virtual mapping capabilities)
- * The routine returns the number of addr/length pairs actually
- * used, at most nents.
- *
- * Device ownership issues as mentioned above for pci_map_single are
- * the same here.
- */
-static int pci32_map_sg(struct device *device, struct scatterlist *sgl,
- int nents, enum dma_data_direction dir,
- unsigned long attrs)
-{
- struct scatterlist *sg;
- int n;
-
- /* IIep is write-through, not flushing. */
- for_each_sg(sgl, sg, nents, n) {
- sg->dma_address = sg_phys(sg);
- sg->dma_length = sg->length;
- }
- return nents;
-}
-
-/* Unmap a set of streaming mode DMA translations.
- * Again, cpu read rules concerning calls here are the same as for
- * pci_unmap_single() above.
- */
-static void pci32_unmap_sg(struct device *dev, struct scatterlist *sgl,
- int nents, enum dma_data_direction dir,
- unsigned long attrs)
-{
- struct scatterlist *sg;
- int n;
-
- if (dir != PCI_DMA_TODEVICE && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) {
- for_each_sg(sgl, sg, nents, n) {
- dma_make_coherent(sg_phys(sg), PAGE_ALIGN(sg->length));
- }
- }
-}
-
-/* Make physical memory consistent for a single
- * streaming mode DMA translation before or after a transfer.
- *
- * If you perform a pci_map_single() but wish to interrogate the
- * buffer using the cpu, yet do not wish to teardown the PCI dma
- * mapping, you must call this function before doing so. At the
- * next point you give the PCI dma address back to the card, you
- * must first perform a pci_dma_sync_for_device, and then the
- * device again owns the buffer.
- */
-static void pci32_sync_single_for_cpu(struct device *dev, dma_addr_t ba,
- size_t size, enum dma_data_direction dir)
-{
- if (dir != PCI_DMA_TODEVICE) {
- dma_make_coherent(ba, PAGE_ALIGN(size));
- }
-}
-
-static void pci32_sync_single_for_device(struct device *dev, dma_addr_t ba,
- size_t size, enum dma_data_direction dir)
-{
- if (dir != PCI_DMA_TODEVICE) {
- dma_make_coherent(ba, PAGE_ALIGN(size));
- }
+ free_pages((unsigned long)phys_to_virt(dma_addr), get_order(size));
}
-/* Make physical memory consistent for a set of streaming
- * mode DMA translations after a transfer.
- *
- * The same as pci_dma_sync_single_* but for a scatter-gather list,
- * same rules and usage.
- */
-static void pci32_sync_sg_for_cpu(struct device *dev, struct scatterlist *sgl,
- int nents, enum dma_data_direction dir)
-{
- struct scatterlist *sg;
- int n;
-
- if (dir != PCI_DMA_TODEVICE) {
- for_each_sg(sgl, sg, nents, n) {
- dma_make_coherent(sg_phys(sg), PAGE_ALIGN(sg->length));
- }
- }
-}
+/* IIep is write-through, not flushing on cpu to device transfer. */
-static void pci32_sync_sg_for_device(struct device *device, struct scatterlist *sgl,
- int nents, enum dma_data_direction dir)
+void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
+ size_t size, enum dma_data_direction dir)
{
- struct scatterlist *sg;
- int n;
-
- if (dir != PCI_DMA_TODEVICE) {
- for_each_sg(sgl, sg, nents, n) {
- dma_make_coherent(sg_phys(sg), PAGE_ALIGN(sg->length));
- }
- }
+ if (dir != PCI_DMA_TODEVICE)
+ dma_make_coherent(paddr, PAGE_ALIGN(size));
}
-/* note: leon re-uses pci32_dma_ops */
-const struct dma_map_ops pci32_dma_ops = {
- .alloc = pci32_alloc_coherent,
- .free = pci32_free_coherent,
- .map_page = pci32_map_page,
- .unmap_page = pci32_unmap_page,
- .map_sg = pci32_map_sg,
- .unmap_sg = pci32_unmap_sg,
- .sync_single_for_cpu = pci32_sync_single_for_cpu,
- .sync_single_for_device = pci32_sync_single_for_device,
- .sync_sg_for_cpu = pci32_sync_sg_for_cpu,
- .sync_sg_for_device = pci32_sync_sg_for_device,
-};
-EXPORT_SYMBOL(pci32_dma_ops);
-
const struct dma_map_ops *dma_ops = &sbus_dma_ops;
EXPORT_SYMBOL(dma_ops);
diff --git a/arch/sparc/kernel/kprobes.c b/arch/sparc/kernel/kprobes.c
index ab4ba4347941..dfbca2470536 100644
--- a/arch/sparc/kernel/kprobes.c
+++ b/arch/sparc/kernel/kprobes.c
@@ -147,18 +147,12 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
kcb->kprobe_status = KPROBE_REENTER;
prepare_singlestep(p, regs, kcb);
return 1;
- } else {
- if (*(u32 *)addr != BREAKPOINT_INSTRUCTION) {
+ } else if (*(u32 *)addr != BREAKPOINT_INSTRUCTION) {
/* The breakpoint instruction was removed by
* another cpu right after we hit, no further
* handling of this interrupt is appropriate
*/
- ret = 1;
- goto no_kprobe;
- }
- p = __this_cpu_read(current_kprobe);
- if (p->break_handler && p->break_handler(p, regs))
- goto ss_probe;
+ ret = 1;
}
goto no_kprobe;
}
@@ -181,10 +175,12 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
set_current_kprobe(p, regs, kcb);
kcb->kprobe_status = KPROBE_HIT_ACTIVE;
- if (p->pre_handler && p->pre_handler(p, regs))
+ if (p->pre_handler && p->pre_handler(p, regs)) {
+ reset_current_kprobe();
+ preempt_enable_no_resched();
return 1;
+ }
-ss_probe:
prepare_singlestep(p, regs, kcb);
kcb->kprobe_status = KPROBE_HIT_SS;
return 1;
@@ -441,53 +437,6 @@ out:
exception_exit(prev_state);
}
-/* Jprobes support. */
-int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
-{
- struct jprobe *jp = container_of(p, struct jprobe, kp);
- struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
-
- memcpy(&(kcb->jprobe_saved_regs), regs, sizeof(*regs));
-
- regs->tpc = (unsigned long) jp->entry;
- regs->tnpc = ((unsigned long) jp->entry) + 0x4UL;
- regs->tstate |= TSTATE_PIL;
-
- return 1;
-}
-
-void __kprobes jprobe_return(void)
-{
- struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
- register unsigned long orig_fp asm("g1");
-
- orig_fp = kcb->jprobe_saved_regs.u_regs[UREG_FP];
- __asm__ __volatile__("\n"
-"1: cmp %%sp, %0\n\t"
- "blu,a,pt %%xcc, 1b\n\t"
- " restore\n\t"
- ".globl jprobe_return_trap_instruction\n"
-"jprobe_return_trap_instruction:\n\t"
- "ta 0x70"
- : /* no outputs */
- : "r" (orig_fp));
-}
-
-extern void jprobe_return_trap_instruction(void);
-
-int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
-{
- u32 *addr = (u32 *) regs->tpc;
- struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
-
- if (addr == (u32 *) jprobe_return_trap_instruction) {
- memcpy(regs, &(kcb->jprobe_saved_regs), sizeof(*regs));
- preempt_enable_no_resched();
- return 1;
- }
- return 0;
-}
-
/* The value stored in the return address register is actually 2
* instructions before where the callee will return to.
* Sequences usually look something like this
@@ -562,9 +511,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
regs->tpc = orig_ret_address;
regs->tnpc = orig_ret_address + 4;
- reset_current_kprobe();
kretprobe_hash_unlock(current, &flags);
- preempt_enable_no_resched();
hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
hlist_del(&ri->hlist);
diff --git a/arch/sparc/kernel/time_64.c b/arch/sparc/kernel/time_64.c
index 2ef8cfa9677e..f0eba72aa1ad 100644
--- a/arch/sparc/kernel/time_64.c
+++ b/arch/sparc/kernel/time_64.c
@@ -814,7 +814,7 @@ static void __init get_tick_patch(void)
}
}
-static void init_tick_ops(struct sparc64_tick_ops *ops)
+static void __init init_tick_ops(struct sparc64_tick_ops *ops)
{
unsigned long freq, quotient, tick;