diff options
Diffstat (limited to 'arch/ia64')
-rw-r--r-- | arch/ia64/Kconfig | 2 | ||||
-rw-r--r-- | arch/ia64/include/asm/dma-mapping.h | 11 | ||||
-rw-r--r-- | arch/ia64/include/asm/pci.h | 4 | ||||
-rw-r--r-- | arch/ia64/include/asm/rwsem.h | 25 | ||||
-rw-r--r-- | arch/ia64/include/asm/sn/bte.h | 4 | ||||
-rw-r--r-- | arch/ia64/include/asm/spinlock.h | 20 | ||||
-rw-r--r-- | arch/ia64/include/asm/topology.h | 7 | ||||
-rw-r--r-- | arch/ia64/include/uapi/asm/Kbuild | 1 | ||||
-rw-r--r-- | arch/ia64/kernel/asm-offsets.c | 6 | ||||
-rw-r--r-- | arch/ia64/kernel/fsys.S | 8 | ||||
-rw-r--r-- | arch/ia64/kernel/fsyscall_gtod_data.h | 10 | ||||
-rw-r--r-- | arch/ia64/kernel/mca.c | 8 | ||||
-rw-r--r-- | arch/ia64/kernel/salinfo.c | 5 | ||||
-rw-r--r-- | arch/ia64/kernel/time.c | 42 | ||||
-rw-r--r-- | arch/ia64/sn/kernel/bte.c | 12 | ||||
-rw-r--r-- | arch/ia64/sn/kernel/bte_error.c | 17 | ||||
-rw-r--r-- | arch/ia64/sn/kernel/huberror.c | 2 | ||||
-rw-r--r-- | arch/ia64/sn/kernel/mca.c | 5 |
18 files changed, 94 insertions, 95 deletions
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index 1efc444f5fa1..49583c5a5d44 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig @@ -47,7 +47,7 @@ config IA64 select ARCH_TASK_STRUCT_ALLOCATOR select ARCH_THREAD_STACK_ALLOCATOR select ARCH_CLOCKSOURCE_DATA - select GENERIC_TIME_VSYSCALL_OLD + select GENERIC_TIME_VSYSCALL select SYSCTL_ARCH_UNALIGN_NO_WARN select HAVE_MOD_ARCH_SPECIFIC select MODULES_USE_ELF_RELA diff --git a/arch/ia64/include/asm/dma-mapping.h b/arch/ia64/include/asm/dma-mapping.h index 5da9421fb0ff..c1bab526a046 100644 --- a/arch/ia64/include/asm/dma-mapping.h +++ b/arch/ia64/include/asm/dma-mapping.h @@ -45,15 +45,4 @@ static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr) return daddr; } -static inline void -dma_cache_sync (struct device *dev, void *vaddr, size_t size, - enum dma_data_direction dir) -{ - /* - * IA-64 is cache-coherent, so this is mostly a no-op. However, we do need to - * ensure that dma_cache_sync() enforces order, hence the mb(). - */ - mb(); -} - #endif /* _ASM_IA64_DMA_MAPPING_H */ diff --git a/arch/ia64/include/asm/pci.h b/arch/ia64/include/asm/pci.h index e20d77f6a3c1..b1d04e8bafc8 100644 --- a/arch/ia64/include/asm/pci.h +++ b/arch/ia64/include/asm/pci.h @@ -30,10 +30,6 @@ struct pci_vector_struct { #define PCIBIOS_MIN_IO 0x1000 #define PCIBIOS_MIN_MEM 0x10000000 -void pcibios_config_init(void); - -struct pci_dev; - /* * PCI_DMA_BUS_IS_PHYS should be set to 1 if there is _necessarily_ a direct * correspondence between device bus addresses and CPU physical addresses. diff --git a/arch/ia64/include/asm/rwsem.h b/arch/ia64/include/asm/rwsem.h index 7d6fceb3d567..917910607e0e 100644 --- a/arch/ia64/include/asm/rwsem.h +++ b/arch/ia64/include/asm/rwsem.h @@ -38,15 +38,31 @@ /* * lock for reading */ -static inline void -__down_read (struct rw_semaphore *sem) +static inline int +___down_read (struct rw_semaphore *sem) { long result = ia64_fetchadd8_acq((unsigned long *)&sem->count.counter, 1); - if (result < 0) + return (result < 0); +} + +static inline void +__down_read (struct rw_semaphore *sem) +{ + if (___down_read(sem)) rwsem_down_read_failed(sem); } +static inline int +__down_read_killable (struct rw_semaphore *sem) +{ + if (___down_read(sem)) + if (IS_ERR(rwsem_down_read_failed_killable(sem))) + return -EINTR; + + return 0; +} + /* * lock for writing */ @@ -73,9 +89,10 @@ __down_write (struct rw_semaphore *sem) static inline int __down_write_killable (struct rw_semaphore *sem) { - if (___down_write(sem)) + if (___down_write(sem)) { if (IS_ERR(rwsem_down_write_failed_killable(sem))) return -EINTR; + } return 0; } diff --git a/arch/ia64/include/asm/sn/bte.h b/arch/ia64/include/asm/sn/bte.h index cc6c4dbf53af..cd71ab5faf62 100644 --- a/arch/ia64/include/asm/sn/bte.h +++ b/arch/ia64/include/asm/sn/bte.h @@ -17,6 +17,8 @@ #include <asm/sn/types.h> #include <asm/sn/shub_mmr.h> +struct nodepda_s; + #define IBCT_NOTIFY (0x1UL << 4) #define IBCT_ZFIL_MODE (0x1UL << 0) @@ -210,7 +212,7 @@ struct bteinfo_s { */ extern bte_result_t bte_copy(u64, u64, u64, u64, void *); extern bte_result_t bte_unaligned_copy(u64, u64, u64, u64); -extern void bte_error_handler(unsigned long); +extern void bte_error_handler(struct nodepda_s *); #define bte_zero(dest, len, mode, notification) \ bte_copy(0, dest, len, ((mode) | BTE_ZERO_FILL), notification) diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h index aa057abd948e..afd0b3121b4c 100644 --- a/arch/ia64/include/asm/spinlock.h +++ b/arch/ia64/include/asm/spinlock.h @@ -62,7 +62,7 @@ static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock) static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock) { - int tmp = ACCESS_ONCE(lock->lock); + int tmp = READ_ONCE(lock->lock); if (!(((tmp >> TICKET_SHIFT) ^ tmp) & TICKET_MASK)) return ia64_cmpxchg(acq, &lock->lock, tmp, tmp + 1, sizeof (tmp)) == tmp; @@ -74,19 +74,19 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock) unsigned short *p = (unsigned short *)&lock->lock + 1, tmp; asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p)); - ACCESS_ONCE(*p) = (tmp + 2) & ~1; + WRITE_ONCE(*p, (tmp + 2) & ~1); } static inline int __ticket_spin_is_locked(arch_spinlock_t *lock) { - long tmp = ACCESS_ONCE(lock->lock); + long tmp = READ_ONCE(lock->lock); return !!(((tmp >> TICKET_SHIFT) ^ tmp) & TICKET_MASK); } static inline int __ticket_spin_is_contended(arch_spinlock_t *lock) { - long tmp = ACCESS_ONCE(lock->lock); + long tmp = READ_ONCE(lock->lock); return ((tmp - (tmp >> TICKET_SHIFT)) & TICKET_MASK) > 1; } @@ -127,9 +127,7 @@ static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock, { arch_spin_lock(lock); } - -#define arch_read_can_lock(rw) (*(volatile int *)(rw) >= 0) -#define arch_write_can_lock(rw) (*(volatile int *)(rw) == 0) +#define arch_spin_lock_flags arch_spin_lock_flags #ifdef ASM_SUPPORTED @@ -157,6 +155,7 @@ arch_read_lock_flags(arch_rwlock_t *lock, unsigned long flags) : "p6", "p7", "r2", "memory"); } +#define arch_read_lock_flags arch_read_lock_flags #define arch_read_lock(lock) arch_read_lock_flags(lock, 0) #else /* !ASM_SUPPORTED */ @@ -209,6 +208,7 @@ arch_write_lock_flags(arch_rwlock_t *lock, unsigned long flags) : "ar.ccv", "p6", "p7", "r2", "r29", "memory"); } +#define arch_write_lock_flags arch_write_lock_flags #define arch_write_lock(rw) arch_write_lock_flags(rw, 0) #define arch_write_trylock(rw) \ @@ -232,8 +232,6 @@ static inline void arch_write_unlock(arch_rwlock_t *x) #else /* !ASM_SUPPORTED */ -#define arch_write_lock_flags(l, flags) arch_write_lock(l) - #define arch_write_lock(l) \ ({ \ __u64 ia64_val, ia64_set_val = ia64_dep_mi(-1, 0, 31, 1); \ @@ -273,8 +271,4 @@ static inline int arch_read_trylock(arch_rwlock_t *x) return (u32)ia64_cmpxchg4_acq((__u32 *)(x), new.word, old.word) == old.word; } -#define arch_spin_relax(lock) cpu_relax() -#define arch_read_relax(lock) cpu_relax() -#define arch_write_relax(lock) cpu_relax() - #endif /* _ASM_IA64_SPINLOCK_H */ diff --git a/arch/ia64/include/asm/topology.h b/arch/ia64/include/asm/topology.h index 3ad8f6988363..82f9bf702804 100644 --- a/arch/ia64/include/asm/topology.h +++ b/arch/ia64/include/asm/topology.h @@ -34,13 +34,6 @@ &node_to_cpu_mask[node]) /* - * Returns the number of the node containing Node 'nid'. - * Not implemented here. Multi-level hierarchies detected with - * the help of node_distance(). - */ -#define parent_node(nid) (nid) - -/* * Determines the node for a given pci bus */ #define pcibus_to_node(bus) PCI_CONTROLLER(bus)->node diff --git a/arch/ia64/include/uapi/asm/Kbuild b/arch/ia64/include/uapi/asm/Kbuild index 13a97aa2285f..f5c6967a93bb 100644 --- a/arch/ia64/include/uapi/asm/Kbuild +++ b/arch/ia64/include/uapi/asm/Kbuild @@ -1,4 +1,5 @@ # UAPI Header export list include include/uapi/asm-generic/Kbuild.asm +generic-y += bpf_perf_event.h generic-y += kvm_para.h diff --git a/arch/ia64/kernel/asm-offsets.c b/arch/ia64/kernel/asm-offsets.c index b385ff2bf6ce..f4db2168d1b8 100644 --- a/arch/ia64/kernel/asm-offsets.c +++ b/arch/ia64/kernel/asm-offsets.c @@ -31,8 +31,8 @@ void foo(void) DEFINE(SIGFRAME_SIZE, sizeof (struct sigframe)); DEFINE(UNW_FRAME_INFO_SIZE, sizeof (struct unw_frame_info)); - BUILD_BUG_ON(sizeof(struct upid) != 32); - DEFINE(IA64_UPID_SHIFT, 5); + BUILD_BUG_ON(sizeof(struct upid) != 16); + DEFINE(IA64_UPID_SHIFT, 4); BLANK(); @@ -212,6 +212,8 @@ void foo(void) BLANK(); DEFINE(IA64_TIMESPEC_TV_NSEC_OFFSET, offsetof (struct timespec, tv_nsec)); + DEFINE(IA64_TIME_SN_SPEC_SNSEC_OFFSET, + offsetof (struct time_sn_spec, snsec)); DEFINE(CLONE_SETTLS_BIT, 19); #if CLONE_SETTLS != (1<<19) diff --git a/arch/ia64/kernel/fsys.S b/arch/ia64/kernel/fsys.S index c0e7c9af2bb9..fe742ffafc7a 100644 --- a/arch/ia64/kernel/fsys.S +++ b/arch/ia64/kernel/fsys.S @@ -236,9 +236,9 @@ ENTRY(fsys_gettimeofday) MOV_FROM_ITC(p8, p6, r2, r10) // CPU_TIMER. 36 clocks latency!!! (p9) ld8 r2 = [r30] // MMIO_TIMER. Could also have latency issues.. (p13) ld8 r25 = [r19] // get itc_lastcycle value - ld8 r9 = [r22],IA64_TIMESPEC_TV_NSEC_OFFSET // tv_sec + ld8 r9 = [r22],IA64_TIME_SN_SPEC_SNSEC_OFFSET // sec ;; - ld8 r8 = [r22],-IA64_TIMESPEC_TV_NSEC_OFFSET // tv_nsec + ld8 r8 = [r22],-IA64_TIME_SN_SPEC_SNSEC_OFFSET // snsec (p13) sub r3 = r25,r2 // Diff needed before comparison (thanks davidm) ;; (p13) cmp.gt.unc p6,p7 = r3,r0 // check if it is less than last. p6,p7 cleared @@ -266,9 +266,9 @@ EX(.fail_efault, probe.w.fault r31, 3) mf ;; ld4 r10 = [r20] // gtod_lock.sequence - shr.u r2 = r2,r23 // shift by factor - ;; add r8 = r8,r2 // Add xtime.nsecs + ;; + shr.u r8 = r8,r23 // shift by factor cmp4.ne p7,p0 = r28,r10 (p7) br.cond.dpnt.few .time_redo // sequence number changed, redo // End critical section. diff --git a/arch/ia64/kernel/fsyscall_gtod_data.h b/arch/ia64/kernel/fsyscall_gtod_data.h index 0914c02a1eb0..cc2861445965 100644 --- a/arch/ia64/kernel/fsyscall_gtod_data.h +++ b/arch/ia64/kernel/fsyscall_gtod_data.h @@ -6,10 +6,16 @@ * fsyscall gettimeofday data */ +/* like timespec, but includes "shifted nanoseconds" */ +struct time_sn_spec { + u64 sec; + u64 snsec; +}; + struct fsyscall_gtod_data_t { seqcount_t seq; - struct timespec wall_time; - struct timespec monotonic_time; + struct time_sn_spec wall_time; + struct time_sn_spec monotonic_time; u64 clk_mask; u32 clk_mult; u32 clk_shift; diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c index 555b11180156..6115464d5f03 100644 --- a/arch/ia64/kernel/mca.c +++ b/arch/ia64/kernel/mca.c @@ -1513,7 +1513,7 @@ ia64_mca_cmc_int_caller(int cmc_irq, void *arg) * */ static void -ia64_mca_cmc_poll (unsigned long dummy) +ia64_mca_cmc_poll (struct timer_list *unused) { /* Trigger a CMC interrupt cascade */ platform_send_ipi(cpumask_first(cpu_online_mask), IA64_CMCP_VECTOR, @@ -1590,7 +1590,7 @@ ia64_mca_cpe_int_caller(int cpe_irq, void *arg) * */ static void -ia64_mca_cpe_poll (unsigned long dummy) +ia64_mca_cpe_poll (struct timer_list *unused) { /* Trigger a CPE interrupt cascade */ platform_send_ipi(cpumask_first(cpu_online_mask), IA64_CPEP_VECTOR, @@ -2098,7 +2098,7 @@ ia64_mca_late_init(void) return 0; /* Setup the CMCI/P vector and handler */ - setup_timer(&cmc_poll_timer, ia64_mca_cmc_poll, 0UL); + timer_setup(&cmc_poll_timer, ia64_mca_cmc_poll, 0); /* Unmask/enable the vector */ cmc_polling_enabled = 0; @@ -2109,7 +2109,7 @@ ia64_mca_late_init(void) #ifdef CONFIG_ACPI /* Setup the CPEI/P vector and handler */ cpe_vector = acpi_request_vector(ACPI_INTERRUPT_CPEI); - setup_timer(&cpe_poll_timer, ia64_mca_cpe_poll, 0UL); + timer_setup(&cpe_poll_timer, ia64_mca_cpe_poll, 0); { unsigned int irq; diff --git a/arch/ia64/kernel/salinfo.c b/arch/ia64/kernel/salinfo.c index 63dc9cdc95c5..52c404b08904 100644 --- a/arch/ia64/kernel/salinfo.c +++ b/arch/ia64/kernel/salinfo.c @@ -263,7 +263,7 @@ salinfo_timeout_check(struct salinfo_data *data) } static void -salinfo_timeout (unsigned long arg) +salinfo_timeout(struct timer_list *unused) { ia64_mlogbuf_dump(); salinfo_timeout_check(salinfo_data + SAL_INFO_TYPE_MCA); @@ -623,9 +623,8 @@ salinfo_init(void) *sdir++ = salinfo_dir; - init_timer(&salinfo_timer); + timer_setup(&salinfo_timer, salinfo_timeout, 0); salinfo_timer.expires = jiffies + SALINFO_TIMER_DELAY; - salinfo_timer.function = &salinfo_timeout; add_timer(&salinfo_timer); i = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "ia64/salinfo:online", diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c index aa7be020a904..9025699049ca 100644 --- a/arch/ia64/kernel/time.c +++ b/arch/ia64/kernel/time.c @@ -88,7 +88,7 @@ void vtime_flush(struct task_struct *tsk) } if (ti->softirq_time) { - delta = cycle_to_nsec(ti->softirq_time)); + delta = cycle_to_nsec(ti->softirq_time); account_system_index_time(tsk, delta, CPUTIME_SOFTIRQ); } @@ -430,30 +430,32 @@ void update_vsyscall_tz(void) { } -void update_vsyscall_old(struct timespec *wall, struct timespec *wtm, - struct clocksource *c, u32 mult, u64 cycle_last) +void update_vsyscall(struct timekeeper *tk) { write_seqcount_begin(&fsyscall_gtod_data.seq); - /* copy fsyscall clock data */ - fsyscall_gtod_data.clk_mask = c->mask; - fsyscall_gtod_data.clk_mult = mult; - fsyscall_gtod_data.clk_shift = c->shift; - fsyscall_gtod_data.clk_fsys_mmio = c->archdata.fsys_mmio; - fsyscall_gtod_data.clk_cycle_last = cycle_last; - - /* copy kernel time structures */ - fsyscall_gtod_data.wall_time.tv_sec = wall->tv_sec; - fsyscall_gtod_data.wall_time.tv_nsec = wall->tv_nsec; - fsyscall_gtod_data.monotonic_time.tv_sec = wtm->tv_sec - + wall->tv_sec; - fsyscall_gtod_data.monotonic_time.tv_nsec = wtm->tv_nsec - + wall->tv_nsec; + /* copy vsyscall data */ + fsyscall_gtod_data.clk_mask = tk->tkr_mono.mask; + fsyscall_gtod_data.clk_mult = tk->tkr_mono.mult; + fsyscall_gtod_data.clk_shift = tk->tkr_mono.shift; + fsyscall_gtod_data.clk_fsys_mmio = tk->tkr_mono.clock->archdata.fsys_mmio; + fsyscall_gtod_data.clk_cycle_last = tk->tkr_mono.cycle_last; + + fsyscall_gtod_data.wall_time.sec = tk->xtime_sec; + fsyscall_gtod_data.wall_time.snsec = tk->tkr_mono.xtime_nsec; + + fsyscall_gtod_data.monotonic_time.sec = tk->xtime_sec + + tk->wall_to_monotonic.tv_sec; + fsyscall_gtod_data.monotonic_time.snsec = tk->tkr_mono.xtime_nsec + + ((u64)tk->wall_to_monotonic.tv_nsec + << tk->tkr_mono.shift); /* normalize */ - while (fsyscall_gtod_data.monotonic_time.tv_nsec >= NSEC_PER_SEC) { - fsyscall_gtod_data.monotonic_time.tv_nsec -= NSEC_PER_SEC; - fsyscall_gtod_data.monotonic_time.tv_sec++; + while (fsyscall_gtod_data.monotonic_time.snsec >= + (((u64)NSEC_PER_SEC) << tk->tkr_mono.shift)) { + fsyscall_gtod_data.monotonic_time.snsec -= + ((u64)NSEC_PER_SEC) << tk->tkr_mono.shift; + fsyscall_gtod_data.monotonic_time.sec++; } write_seqcount_end(&fsyscall_gtod_data.seq); diff --git a/arch/ia64/sn/kernel/bte.c b/arch/ia64/sn/kernel/bte.c index b2eb48490754..9146192b86f5 100644 --- a/arch/ia64/sn/kernel/bte.c +++ b/arch/ia64/sn/kernel/bte.c @@ -219,7 +219,7 @@ retry_bteop: BTE_LNSTAT_LOAD(bte), *bte->most_rcnt_na) ); bte->bte_error_count++; bte->bh_error = IBLS_ERROR; - bte_error_handler((unsigned long)NODEPDA(bte->bte_cnode)); + bte_error_handler(NODEPDA(bte->bte_cnode)); *bte->most_rcnt_na = BTE_WORD_AVAILABLE; goto retry_bteop; } @@ -414,6 +414,12 @@ EXPORT_SYMBOL(bte_unaligned_copy); * Block Transfer Engine initialization functions. * ***********************************************************************/ +static void bte_recovery_timeout(struct timer_list *t) +{ + struct nodepda_s *nodepda = from_timer(nodepda, t, bte_recovery_timer); + + bte_error_handler(nodepda); +} /* * bte_init_node(nodepda, cnode) @@ -436,9 +442,7 @@ void bte_init_node(nodepda_t * mynodepda, cnodeid_t cnode) * will point at this one bte_recover structure to get the lock. */ spin_lock_init(&mynodepda->bte_recovery_lock); - init_timer(&mynodepda->bte_recovery_timer); - mynodepda->bte_recovery_timer.function = bte_error_handler; - mynodepda->bte_recovery_timer.data = (unsigned long)mynodepda; + timer_setup(&mynodepda->bte_recovery_timer, bte_recovery_timeout, 0); for (i = 0; i < BTES_PER_NODE; i++) { u64 *base_addr; diff --git a/arch/ia64/sn/kernel/bte_error.c b/arch/ia64/sn/kernel/bte_error.c index 4cb09f3f1efc..d92786c09b34 100644 --- a/arch/ia64/sn/kernel/bte_error.c +++ b/arch/ia64/sn/kernel/bte_error.c @@ -27,15 +27,12 @@ * transfers to be queued. */ -void bte_error_handler(unsigned long); - /* * Wait until all BTE related CRBs are completed * and then reset the interfaces. */ -int shub1_bte_error_handler(unsigned long _nodepda) +static int shub1_bte_error_handler(struct nodepda_s *err_nodepda) { - struct nodepda_s *err_nodepda = (struct nodepda_s *)_nodepda; struct timer_list *recovery_timer = &err_nodepda->bte_recovery_timer; nasid_t nasid; int i; @@ -131,9 +128,8 @@ int shub1_bte_error_handler(unsigned long _nodepda) * Wait until all BTE related CRBs are completed * and then reset the interfaces. */ -int shub2_bte_error_handler(unsigned long _nodepda) +static int shub2_bte_error_handler(struct nodepda_s *err_nodepda) { - struct nodepda_s *err_nodepda = (struct nodepda_s *)_nodepda; struct timer_list *recovery_timer = &err_nodepda->bte_recovery_timer; struct bteinfo_s *bte; nasid_t nasid; @@ -170,9 +166,8 @@ int shub2_bte_error_handler(unsigned long _nodepda) * Wait until all BTE related CRBs are completed * and then reset the interfaces. */ -void bte_error_handler(unsigned long _nodepda) +void bte_error_handler(struct nodepda_s *err_nodepda) { - struct nodepda_s *err_nodepda = (struct nodepda_s *)_nodepda; spinlock_t *recovery_lock = &err_nodepda->bte_recovery_lock; int i; unsigned long irq_flags; @@ -199,12 +194,12 @@ void bte_error_handler(unsigned long _nodepda) } if (is_shub1()) { - if (shub1_bte_error_handler(_nodepda)) { + if (shub1_bte_error_handler(err_nodepda)) { spin_unlock_irqrestore(recovery_lock, irq_flags); return; } } else { - if (shub2_bte_error_handler(_nodepda)) { + if (shub2_bte_error_handler(err_nodepda)) { spin_unlock_irqrestore(recovery_lock, irq_flags); return; } @@ -255,6 +250,6 @@ bte_crb_error_handler(cnodeid_t cnode, int btenum, BTE_PRINTK(("Got an error on cnode %d bte %d: HW error type 0x%x\n", bte->bte_cnode, bte->bte_num, ioe->ie_errortype)); - bte_error_handler((unsigned long) NODEPDA(cnode)); + bte_error_handler(NODEPDA(cnode)); } diff --git a/arch/ia64/sn/kernel/huberror.c b/arch/ia64/sn/kernel/huberror.c index f925dec2da92..97fa56dddf50 100644 --- a/arch/ia64/sn/kernel/huberror.c +++ b/arch/ia64/sn/kernel/huberror.c @@ -50,7 +50,7 @@ static irqreturn_t hub_eint_handler(int irq, void *arg) if ((int)ret_stuff.v0) panic("%s: Fatal TIO Error", __func__); } else - bte_error_handler((unsigned long)NODEPDA(nasid_to_cnodeid(nasid))); + bte_error_handler(NODEPDA(nasid_to_cnodeid(nasid))); return IRQ_HANDLED; } diff --git a/arch/ia64/sn/kernel/mca.c b/arch/ia64/sn/kernel/mca.c index 5b799d4deb74..bc3bd930c74c 100644 --- a/arch/ia64/sn/kernel/mca.c +++ b/arch/ia64/sn/kernel/mca.c @@ -72,7 +72,7 @@ static void sn_cpei_handler(int irq, void *devid, struct pt_regs *regs) ia64_sn_plat_cpei_handler(); } -static void sn_cpei_timer_handler(unsigned long dummy) +static void sn_cpei_timer_handler(struct timer_list *unused) { sn_cpei_handler(-1, NULL, NULL); mod_timer(&sn_cpei_timer, jiffies + CPEI_INTERVAL); @@ -80,9 +80,8 @@ static void sn_cpei_timer_handler(unsigned long dummy) void sn_init_cpei_timer(void) { - init_timer(&sn_cpei_timer); + timer_setup(&sn_cpei_timer, sn_cpei_timer_handler, 0); sn_cpei_timer.expires = jiffies + CPEI_INTERVAL; - sn_cpei_timer.function = sn_cpei_timer_handler; add_timer(&sn_cpei_timer); } |