aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64/kernel
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2019-08-13 09:25:02 +0200
committerTony Luck <tony.luck@intel.com>2019-08-16 11:33:57 -0700
commit05933aac7b11911955de307a329dc2a7a14b7bd0 (patch)
tree5fbf522b4adc70501eed8986930bc0744bbb7810 /arch/ia64/kernel
parentia64: remove support for the SGI SN2 platform (diff)
downloadlinux-dev-05933aac7b11911955de307a329dc2a7a14b7bd0.tar.xz
linux-dev-05933aac7b11911955de307a329dc2a7a14b7bd0.zip
ia64: remove now unused machvec indirections
With the SGI SN2 machvec removal most of the indirections are unused now, so remove them. This includes the entire removal of the mmio read*/write* macros as the generic ones are identical to the asm-generic/io.h version. Signed-off-by: Christoph Hellwig <hch@lst.de> Link: https://lkml.kernel.org/r/20190813072514.23299-17-hch@lst.de Signed-off-by: Tony Luck <tony.luck@intel.com>
Diffstat (limited to 'arch/ia64/kernel')
-rw-r--r--arch/ia64/kernel/iosapic.c5
-rw-r--r--arch/ia64/kernel/irq.c12
-rw-r--r--arch/ia64/kernel/irq_ia64.c2
-rw-r--r--arch/ia64/kernel/machine_kexec.c1
-rw-r--r--arch/ia64/kernel/machvec.c7
-rw-r--r--arch/ia64/kernel/mca.c10
-rw-r--r--arch/ia64/kernel/msi_ia64.c21
-rw-r--r--arch/ia64/kernel/sal.c2
-rw-r--r--arch/ia64/kernel/setup.c1
-rw-r--r--arch/ia64/kernel/smp.c8
-rw-r--r--arch/ia64/kernel/smpboot.c2
-rw-r--r--arch/ia64/kernel/time.c2
12 files changed, 15 insertions, 58 deletions
diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c
index fe6e4946672e..9e49fd006859 100644
--- a/arch/ia64/kernel/iosapic.c
+++ b/arch/ia64/kernel/iosapic.c
@@ -67,10 +67,7 @@
* used as architecture-independent interrupt handling mechanism in Linux.
* As an IRQ is a number, we have to have
* IA-64 interrupt vector number <-> IRQ number mapping. On smaller
- * systems, we use one-to-one mapping between IA-64 vector and IRQ. A
- * platform can implement platform_irq_to_vector(irq) and
- * platform_local_vector_to_irq(vector) APIs to differentiate the mapping.
- * Please see also arch/ia64/include/asm/hw_irq.h for those APIs.
+ * systems, we use one-to-one mapping between IA-64 vector and IRQ.
*
* To sum up, there are three levels of mappings involved:
*
diff --git a/arch/ia64/kernel/irq.c b/arch/ia64/kernel/irq.c
index 6d17d26caf98..0a8e5e585edc 100644
--- a/arch/ia64/kernel/irq.c
+++ b/arch/ia64/kernel/irq.c
@@ -35,18 +35,6 @@ void ack_bad_irq(unsigned int irq)
printk(KERN_ERR "Unexpected irq vector 0x%x on CPU %u!\n", irq, smp_processor_id());
}
-#ifdef CONFIG_IA64_GENERIC
-ia64_vector __ia64_irq_to_vector(int irq)
-{
- return irq_cfg[irq].vector;
-}
-
-unsigned int __ia64_local_vector_to_irq (ia64_vector vec)
-{
- return __this_cpu_read(vector_irq[vec]);
-}
-#endif
-
/*
* Interrupt statistics:
*/
diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c
index ab87d6c25b15..1c81ec752b04 100644
--- a/arch/ia64/kernel/irq_ia64.c
+++ b/arch/ia64/kernel/irq_ia64.c
@@ -314,7 +314,7 @@ void irq_complete_move(unsigned irq)
cpumask_and(&cleanup_mask, &cfg->old_domain, cpu_online_mask);
cfg->move_cleanup_count = cpumask_weight(&cleanup_mask);
for_each_cpu(i, &cleanup_mask)
- platform_send_ipi(i, IA64_IRQ_MOVE_VECTOR, IA64_IPI_DM_INT, 0);
+ ia64_send_ipi(i, IA64_IRQ_MOVE_VECTOR, IA64_IPI_DM_INT, 0);
cfg->move_in_progress = 0;
}
diff --git a/arch/ia64/kernel/machine_kexec.c b/arch/ia64/kernel/machine_kexec.c
index 3b1dd5496d08..efc9b568401c 100644
--- a/arch/ia64/kernel/machine_kexec.c
+++ b/arch/ia64/kernel/machine_kexec.c
@@ -127,7 +127,6 @@ static void ia64_machine_kexec(struct unw_frame_info *info, void *arg)
ia64_srlz_d();
while (ia64_get_ivr() != IA64_SPURIOUS_INT_VECTOR)
ia64_eoi();
- platform_kernel_launch_event();
rnk = (relocate_new_kernel_t)&code_addr;
(*rnk)(image->head, image->start, ia64_boot_param,
GRANULEROUNDDOWN((unsigned long) pal_addr));
diff --git a/arch/ia64/kernel/machvec.c b/arch/ia64/kernel/machvec.c
index ebd82535f51b..3db3be7aaae5 100644
--- a/arch/ia64/kernel/machvec.c
+++ b/arch/ia64/kernel/machvec.c
@@ -11,7 +11,6 @@
#include <asm/page.h>
struct ia64_machine_vector ia64_mv = {
- .mmiowb = ___ia64_mmiowb
};
EXPORT_SYMBOL(ia64_mv);
@@ -69,9 +68,3 @@ machvec_setup (char **arg)
{
}
EXPORT_SYMBOL(machvec_setup);
-
-void
-machvec_timer_interrupt (int irq, void *dev_id)
-{
-}
-EXPORT_SYMBOL(machvec_timer_interrupt);
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c
index 79190d877fa7..f72b05fe918b 100644
--- a/arch/ia64/kernel/mca.c
+++ b/arch/ia64/kernel/mca.c
@@ -744,7 +744,7 @@ ia64_mca_cmc_vector_enable_keventd(struct work_struct *unused)
static void
ia64_mca_wakeup(int cpu)
{
- platform_send_ipi(cpu, IA64_MCA_WAKEUP_VECTOR, IA64_IPI_DM_INT, 0);
+ ia64_send_ipi(cpu, IA64_MCA_WAKEUP_VECTOR, IA64_IPI_DM_INT, 0);
}
/*
@@ -1490,7 +1490,7 @@ ia64_mca_cmc_int_caller(int cmc_irq, void *arg)
cpuid = cpumask_next(cpuid+1, cpu_online_mask);
if (cpuid < nr_cpu_ids) {
- platform_send_ipi(cpuid, IA64_CMCP_VECTOR, IA64_IPI_DM_INT, 0);
+ ia64_send_ipi(cpuid, IA64_CMCP_VECTOR, IA64_IPI_DM_INT, 0);
} else {
/* If no log record, switch out of polling mode */
if (start_count == IA64_LOG_COUNT(SAL_INFO_TYPE_CMC)) {
@@ -1523,7 +1523,7 @@ static void
ia64_mca_cmc_poll (struct timer_list *unused)
{
/* Trigger a CMC interrupt cascade */
- platform_send_ipi(cpumask_first(cpu_online_mask), IA64_CMCP_VECTOR,
+ ia64_send_ipi(cpumask_first(cpu_online_mask), IA64_CMCP_VECTOR,
IA64_IPI_DM_INT, 0);
}
@@ -1560,7 +1560,7 @@ ia64_mca_cpe_int_caller(int cpe_irq, void *arg)
cpuid = cpumask_next(cpuid+1, cpu_online_mask);
if (cpuid < NR_CPUS) {
- platform_send_ipi(cpuid, IA64_CPEP_VECTOR, IA64_IPI_DM_INT, 0);
+ ia64_send_ipi(cpuid, IA64_CPEP_VECTOR, IA64_IPI_DM_INT, 0);
} else {
/*
* If a log was recorded, increase our polling frequency,
@@ -1600,7 +1600,7 @@ static void
ia64_mca_cpe_poll (struct timer_list *unused)
{
/* Trigger a CPE interrupt cascade */
- platform_send_ipi(cpumask_first(cpu_online_mask), IA64_CPEP_VECTOR,
+ ia64_send_ipi(cpumask_first(cpu_online_mask), IA64_CPEP_VECTOR,
IA64_IPI_DM_INT, 0);
}
diff --git a/arch/ia64/kernel/msi_ia64.c b/arch/ia64/kernel/msi_ia64.c
index 519d9432f407..df5c28f252e3 100644
--- a/arch/ia64/kernel/msi_ia64.c
+++ b/arch/ia64/kernel/msi_ia64.c
@@ -43,7 +43,7 @@ static int ia64_set_msi_irq_affinity(struct irq_data *idata,
}
#endif /* CONFIG_SMP */
-int ia64_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc)
+int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc)
{
struct msi_msg msg;
unsigned long dest_phys_id;
@@ -77,7 +77,7 @@ int ia64_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc)
return 0;
}
-void ia64_teardown_msi_irq(unsigned int irq)
+void arch_teardown_msi_irq(unsigned int irq)
{
destroy_irq(irq);
}
@@ -111,23 +111,6 @@ static struct irq_chip ia64_msi_chip = {
.irq_retrigger = ia64_msi_retrigger_irq,
};
-
-int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc)
-{
- if (platform_setup_msi_irq)
- return platform_setup_msi_irq(pdev, desc);
-
- return ia64_setup_msi_irq(pdev, desc);
-}
-
-void arch_teardown_msi_irq(unsigned int irq)
-{
- if (platform_teardown_msi_irq)
- return platform_teardown_msi_irq(irq);
-
- return ia64_teardown_msi_irq(irq);
-}
-
#ifdef CONFIG_INTEL_IOMMU
#ifdef CONFIG_SMP
static int dmar_msi_set_affinity(struct irq_data *data,
diff --git a/arch/ia64/kernel/sal.c b/arch/ia64/kernel/sal.c
index 17085a8078fe..c455ece977ad 100644
--- a/arch/ia64/kernel/sal.c
+++ b/arch/ia64/kernel/sal.c
@@ -249,7 +249,7 @@ check_sal_cache_flush (void)
* Send ourselves a timer interrupt, wait until it's reported, and see
* if SAL_CACHE_FLUSH drops it.
*/
- platform_send_ipi(cpu, IA64_TIMER_VECTOR, IA64_IPI_DM_INT, 0);
+ ia64_send_ipi(cpu, IA64_TIMER_VECTOR, IA64_IPI_DM_INT, 0);
while (!ia64_get_irr(IA64_TIMER_VECTOR))
cpu_relax();
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index 78d0d22dd17e..4dc74500eac5 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -1039,7 +1039,6 @@ cpu_init (void)
ia64_patch_phys_stack_reg(num_phys_stacked*8 + 8);
max_num_phys_stacked = num_phys_stacked;
}
- platform_cpu_init();
}
void __init
diff --git a/arch/ia64/kernel/smp.c b/arch/ia64/kernel/smp.c
index 133b63355814..4825b0b41d49 100644
--- a/arch/ia64/kernel/smp.c
+++ b/arch/ia64/kernel/smp.c
@@ -146,7 +146,7 @@ static inline void
send_IPI_single (int dest_cpu, int op)
{
set_bit(op, &per_cpu(ipi_operation, dest_cpu));
- platform_send_ipi(dest_cpu, IA64_IPI_VECTOR, IA64_IPI_DM_INT, 0);
+ ia64_send_ipi(dest_cpu, IA64_IPI_VECTOR, IA64_IPI_DM_INT, 0);
}
/*
@@ -213,7 +213,7 @@ kdump_smp_send_init(void)
for_each_online_cpu(cpu) {
if (cpu != self_cpu) {
if(kdump_status[cpu] == 0)
- platform_send_ipi(cpu, 0, IA64_IPI_DM_INIT, 0);
+ ia64_send_ipi(cpu, 0, IA64_IPI_DM_INIT, 0);
}
}
}
@@ -224,7 +224,7 @@ kdump_smp_send_init(void)
void
smp_send_reschedule (int cpu)
{
- platform_send_ipi(cpu, IA64_IPI_RESCHEDULE, IA64_IPI_DM_INT, 0);
+ ia64_send_ipi(cpu, IA64_IPI_RESCHEDULE, IA64_IPI_DM_INT, 0);
}
EXPORT_SYMBOL_GPL(smp_send_reschedule);
@@ -234,7 +234,7 @@ EXPORT_SYMBOL_GPL(smp_send_reschedule);
static void
smp_send_local_flush_tlb (int cpu)
{
- platform_send_ipi(cpu, IA64_IPI_LOCAL_TLB_FLUSH, IA64_IPI_DM_INT, 0);
+ ia64_send_ipi(cpu, IA64_IPI_LOCAL_TLB_FLUSH, IA64_IPI_DM_INT, 0);
}
void
diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c
index df56f739dd11..f7058659526c 100644
--- a/arch/ia64/kernel/smpboot.c
+++ b/arch/ia64/kernel/smpboot.c
@@ -467,7 +467,7 @@ do_boot_cpu (int sapicid, int cpu, struct task_struct *idle)
Dprintk("Sending wakeup vector %lu to AP 0x%x/0x%x.\n", ap_wakeup_vector, cpu, sapicid);
set_brendez_area(cpu);
- platform_send_ipi(cpu, ap_wakeup_vector, IA64_IPI_DM_INT, 0);
+ ia64_send_ipi(cpu, ap_wakeup_vector, IA64_IPI_DM_INT, 0);
/*
* Wait 10s total for the AP to start
diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c
index 4ecd81b0e8ec..d9ad93a6d825 100644
--- a/arch/ia64/kernel/time.c
+++ b/arch/ia64/kernel/time.c
@@ -167,8 +167,6 @@ timer_interrupt (int irq, void *dev_id)
return IRQ_HANDLED;
}
- platform_timer_interrupt(irq, dev_id);
-
new_itm = local_cpu_data->itm_next;
if (!time_after(ia64_get_itc(), new_itm))