aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64
diff options
context:
space:
mode:
Diffstat (limited to 'arch/ia64')
-rw-r--r--arch/ia64/include/asm/paravirt_privop.h2
-rw-r--r--arch/ia64/include/asm/smp.h2
-rw-r--r--arch/ia64/include/asm/unistd.h4
-rw-r--r--arch/ia64/kernel/cyclone.c2
-rw-r--r--arch/ia64/kernel/entry.S2
-rw-r--r--arch/ia64/kernel/pci-dma.c6
-rw-r--r--arch/ia64/kernel/pci-swiotlb.c2
-rw-r--r--arch/ia64/kernel/smp.c14
-rw-r--r--arch/ia64/kernel/time.c4
-rw-r--r--arch/ia64/kvm/kvm-ia64.c14
-rw-r--r--arch/ia64/sn/kernel/sn2/timer.c2
-rw-r--r--arch/ia64/sn/pci/pci_dma.c2
12 files changed, 29 insertions, 27 deletions
diff --git a/arch/ia64/include/asm/paravirt_privop.h b/arch/ia64/include/asm/paravirt_privop.h
index 3d2951130b5f..8f6cb11c9fae 100644
--- a/arch/ia64/include/asm/paravirt_privop.h
+++ b/arch/ia64/include/asm/paravirt_privop.h
@@ -445,7 +445,6 @@ paravirt_set_rr0_to_rr4(unsigned long val0, unsigned long val1,
register unsigned long ia64_intri_res asm ("r8"); \
register unsigned long __reg asm ("r8") = (reg); \
\
- BUILD_BUG_ON(!__builtin_constant_p(reg)); \
asm volatile (paravirt_alt_bundle(__PARAVIRT_BR, \
PARAVIRT_TYPE(GETREG) \
+ (reg)) \
@@ -464,7 +463,6 @@ paravirt_set_rr0_to_rr4(unsigned long val0, unsigned long val1,
register unsigned long ia64_clobber1 asm ("r8"); \
register unsigned long ia64_clobber2 asm ("r9"); \
\
- BUILD_BUG_ON(!__builtin_constant_p(reg)); \
asm volatile (paravirt_alt_bundle(__PARAVIRT_BR, \
PARAVIRT_TYPE(SETREG) \
+ (reg)) \
diff --git a/arch/ia64/include/asm/smp.h b/arch/ia64/include/asm/smp.h
index 598408336251..d217d1d4e051 100644
--- a/arch/ia64/include/asm/smp.h
+++ b/arch/ia64/include/asm/smp.h
@@ -58,7 +58,7 @@ extern struct smp_boot_data {
extern char no_int_routing __devinitdata;
extern cpumask_t cpu_core_map[NR_CPUS];
-DECLARE_PER_CPU(cpumask_t, cpu_sibling_map);
+DECLARE_PER_CPU_SHARED_ALIGNED(cpumask_t, cpu_sibling_map);
extern int smp_num_siblings;
extern void __iomem *ipi_base_addr;
extern unsigned char smp_int_redirect;
diff --git a/arch/ia64/include/asm/unistd.h b/arch/ia64/include/asm/unistd.h
index 9015979ebe0f..10a9eb05f74d 100644
--- a/arch/ia64/include/asm/unistd.h
+++ b/arch/ia64/include/asm/unistd.h
@@ -308,11 +308,13 @@
#define __NR_dup3 1316
#define __NR_pipe2 1317
#define __NR_inotify_init1 1318
+#define __NR_preadv 1319
+#define __NR_pwritev 1320
#ifdef __KERNEL__
-#define NR_syscalls 295 /* length of syscall table */
+#define NR_syscalls 297 /* length of syscall table */
/*
* The following defines stop scripts/checksyscalls.sh from complaining about
diff --git a/arch/ia64/kernel/cyclone.c b/arch/ia64/kernel/cyclone.c
index 790ef0d87e12..71e35864d2e2 100644
--- a/arch/ia64/kernel/cyclone.c
+++ b/arch/ia64/kernel/cyclone.c
@@ -21,7 +21,7 @@ void __init cyclone_setup(void)
static void __iomem *cyclone_mc;
-static cycle_t read_cyclone(void)
+static cycle_t read_cyclone(struct clocksource *cs)
{
return (cycle_t)readq((void __iomem *)cyclone_mc);
}
diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S
index 8dc69669586a..7bebac0e1d44 100644
--- a/arch/ia64/kernel/entry.S
+++ b/arch/ia64/kernel/entry.S
@@ -1803,6 +1803,8 @@ sys_call_table:
data8 sys_dup3
data8 sys_pipe2
data8 sys_inotify_init1
+ data8 sys_preadv
+ data8 sys_pwritev // 1320
.org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls
#endif /* __IA64_ASM_PARAVIRTUALIZED_NATIVE */
diff --git a/arch/ia64/kernel/pci-dma.c b/arch/ia64/kernel/pci-dma.c
index e4cb443bb988..eb987386f691 100644
--- a/arch/ia64/kernel/pci-dma.c
+++ b/arch/ia64/kernel/pci-dma.c
@@ -37,7 +37,7 @@ int force_iommu __read_mostly;
to i386. */
struct device fallback_dev = {
.init_name = "fallback device",
- .coherent_dma_mask = DMA_32BIT_MASK,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
.dma_mask = &fallback_dev.coherent_dma_mask,
};
@@ -75,7 +75,7 @@ int iommu_dma_supported(struct device *dev, u64 mask)
/* Copied from i386. Doesn't make much sense, because it will
only work for pci_alloc_coherent.
The caller just has to use GFP_DMA in this case. */
- if (mask < DMA_24BIT_MASK)
+ if (mask < DMA_BIT_MASK(24))
return 0;
/* Tell the device to use SAC when IOMMU force is on. This
@@ -90,7 +90,7 @@ int iommu_dma_supported(struct device *dev, u64 mask)
SAC for these. Assume all masks <= 40 bits are of this
type. Normally this doesn't make any difference, but gives
more gentle handling of IOMMU overflow. */
- if (iommu_sac_force && (mask >= DMA_40BIT_MASK)) {
+ if (iommu_sac_force && (mask >= DMA_BIT_MASK(40))) {
dev_info(dev, "Force SAC with mask %lx\n", mask);
return 0;
}
diff --git a/arch/ia64/kernel/pci-swiotlb.c b/arch/ia64/kernel/pci-swiotlb.c
index 573f02c39a00..285aae8431c6 100644
--- a/arch/ia64/kernel/pci-swiotlb.c
+++ b/arch/ia64/kernel/pci-swiotlb.c
@@ -16,7 +16,7 @@ EXPORT_SYMBOL(swiotlb);
static void *ia64_swiotlb_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp)
{
- if (dev->coherent_dma_mask != DMA_64BIT_MASK)
+ if (dev->coherent_dma_mask != DMA_BIT_MASK(64))
gfp |= GFP_DMA;
return swiotlb_alloc_coherent(dev, size, dma_handle, gfp);
}
diff --git a/arch/ia64/kernel/smp.c b/arch/ia64/kernel/smp.c
index 2ea4199d9c57..5230eaafd83f 100644
--- a/arch/ia64/kernel/smp.c
+++ b/arch/ia64/kernel/smp.c
@@ -225,6 +225,7 @@ smp_send_reschedule (int cpu)
{
platform_send_ipi(cpu, IA64_IPI_RESCHEDULE, IA64_IPI_DM_INT, 0);
}
+EXPORT_SYMBOL_GPL(smp_send_reschedule);
/*
* Called with preemption disabled.
@@ -300,15 +301,12 @@ smp_flush_tlb_mm (struct mm_struct *mm)
return;
}
+ smp_call_function_mask(mm->cpu_vm_mask,
+ (void (*)(void *))local_finish_flush_tlb_mm, mm, 1);
+ local_irq_disable();
+ local_finish_flush_tlb_mm(mm);
+ local_irq_enable();
preempt_enable();
- /*
- * We could optimize this further by using mm->cpu_vm_mask to track which CPUs
- * have been running in the address space. It's not clear that this is worth the
- * trouble though: to avoid races, we have to raise the IPI on the target CPU
- * anyhow, and once a CPU is interrupted, the cost of local_flush_tlb_all() is
- * rather trivial.
- */
- on_each_cpu((void (*)(void *))local_finish_flush_tlb_mm, mm, 1);
}
void arch_send_call_function_single_ipi(int cpu)
diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c
index 641c8b61c4f1..604c1a35db33 100644
--- a/arch/ia64/kernel/time.c
+++ b/arch/ia64/kernel/time.c
@@ -33,7 +33,7 @@
#include "fsyscall_gtod_data.h"
-static cycle_t itc_get_cycles(void);
+static cycle_t itc_get_cycles(struct clocksource *cs);
struct fsyscall_gtod_data_t fsyscall_gtod_data = {
.lock = SEQLOCK_UNLOCKED,
@@ -383,7 +383,7 @@ ia64_init_itm (void)
}
}
-static cycle_t itc_get_cycles(void)
+static cycle_t itc_get_cycles(struct clocksource *cs)
{
u64 lcycle, now, ret;
diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c
index 28af6a731bb8..d20a5db4c4dd 100644
--- a/arch/ia64/kvm/kvm-ia64.c
+++ b/arch/ia64/kvm/kvm-ia64.c
@@ -610,20 +610,22 @@ static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
int r;
again:
- preempt_disable();
- local_irq_disable();
-
if (signal_pending(current)) {
- local_irq_enable();
- preempt_enable();
r = -EINTR;
kvm_run->exit_reason = KVM_EXIT_INTR;
goto out;
}
+ /*
+ * down_read() may sleep and return with interrupts enabled
+ */
+ down_read(&vcpu->kvm->slots_lock);
+
+ preempt_disable();
+ local_irq_disable();
+
vcpu->guest_mode = 1;
kvm_guest_enter();
- down_read(&vcpu->kvm->slots_lock);
r = vti_vcpu_run(vcpu, kvm_run);
if (r < 0) {
local_irq_enable();
diff --git a/arch/ia64/sn/kernel/sn2/timer.c b/arch/ia64/sn/kernel/sn2/timer.c
index cf67fc562054..21d6f09e3447 100644
--- a/arch/ia64/sn/kernel/sn2/timer.c
+++ b/arch/ia64/sn/kernel/sn2/timer.c
@@ -23,7 +23,7 @@
extern unsigned long sn_rtc_cycles_per_second;
-static cycle_t read_sn2(void)
+static cycle_t read_sn2(struct clocksource *cs)
{
return (cycle_t)readq(RTC_COUNTER_ADDR);
}
diff --git a/arch/ia64/sn/pci/pci_dma.c b/arch/ia64/sn/pci/pci_dma.c
index 8c130e8f00e1..d876423e4e75 100644
--- a/arch/ia64/sn/pci/pci_dma.c
+++ b/arch/ia64/sn/pci/pci_dma.c
@@ -349,7 +349,7 @@ static int sn_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
u64 sn_dma_get_required_mask(struct device *dev)
{
- return DMA_64BIT_MASK;
+ return DMA_BIT_MASK(64);
}
EXPORT_SYMBOL_GPL(sn_dma_get_required_mask);