aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/apic/apic_common.c
blob: 43f9eac53437e0136a980cc0044017084f9192d0 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
/*
 * Common functions shared between the various APIC flavours
 *
 * SPDX-License-Identifier: GPL-2.0
 */
#include <linux/irq.h>
#include <asm/apic.h>

int default_cpu_mask_to_apicid(const struct cpumask *msk, struct irq_data *irqd,
			       unsigned int *apicid)
{
	unsigned int cpu = cpumask_first(msk);

	if (cpu >= nr_cpu_ids)
		return -EINVAL;
	*apicid = per_cpu(x86_cpu_to_apicid, cpu);
	irq_data_update_effective_affinity(irqd, cpumask_of(cpu));
	return 0;
}

int flat_cpu_mask_to_apicid(const struct cpumask *mask, struct irq_data *irqd,
			    unsigned int *apicid)

{
	struct cpumask *effmsk = irq_data_get_effective_affinity_mask(irqd);
	unsigned long cpu_mask = cpumask_bits(mask)[0] & APIC_ALL_CPUS;

	if (!cpu_mask)
		return -EINVAL;
	*apicid = (unsigned int)cpu_mask;
	cpumask_bits(effmsk)[0] = cpu_mask;
	return 0;
}

bool default_check_apicid_used(physid_mask_t *map, int apicid)
{
	return physid_isset(apicid, *map);
}

void flat_vector_allocation_domain(int cpu, struct cpumask *retmask,
				   const struct cpumask *mask)
{
	/*
	 * Careful. Some cpus do not strictly honor the set of cpus
	 * specified in the interrupt destination when using lowest
	 * priority interrupt delivery mode.
	 *
	 * In particular there was a hyperthreading cpu observed to
	 * deliver interrupts to the wrong hyperthread when only one
	 * hyperthread was specified in the interrupt desitination.
	 */
	cpumask_clear(retmask);
	cpumask_bits(retmask)[0] = APIC_ALL_CPUS;
}

void default_vector_allocation_domain(int cpu, struct cpumask *retmask,
				      const struct cpumask *mask)
{
	cpumask_copy(retmask, cpumask_of(cpu));
}

void default_ioapic_phys_id_map(physid_mask_t *phys_map, physid_mask_t *retmap)
{
	*retmap = *phys_map;
}

int default_cpu_present_to_apicid(int mps_cpu)
{
	if (mps_cpu < nr_cpu_ids && cpu_present(mps_cpu))
		return (int)per_cpu(x86_bios_cpu_apicid, mps_cpu);
	else
		return BAD_APICID;
}
EXPORT_SYMBOL_GPL(default_cpu_present_to_apicid);

int default_check_phys_apicid_present(int phys_apicid)
{
	return physid_isset(phys_apicid, phys_cpu_present_map);
}

const struct cpumask *default_target_cpus(void)
{
#ifdef CONFIG_SMP
	return cpu_online_mask;
#else
	return cpumask_of(0);
#endif
}

const struct cpumask *online_target_cpus(void)
{
	return cpu_online_mask;
}

int default_apic_id_valid(int apicid)
{
	return (apicid < 255);
}