aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-x86/pda.h
blob: 35962bbe5e72ad1219e576c911cf9af1f6aaecfe (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
#ifndef X86_64_PDA_H
#define X86_64_PDA_H

#ifndef __ASSEMBLY__
#include <linux/stddef.h>
#include <linux/types.h>
#include <linux/cache.h>
#include <asm/page.h>

/* Per processor datastructure. %gs points to it while the kernel runs */ 
struct x8664_pda {
	struct task_struct *pcurrent;	/* 0  Current process */
	unsigned long data_offset;	/* 8 Per cpu data offset from linker
					   address */
	unsigned long kernelstack;  /* 16 top of kernel stack for current */
	unsigned long oldrsp; 	    /* 24 user rsp for system call */
        int irqcount;		    /* 32 Irq nesting counter. Starts with -1 */
	int cpunumber;		    /* 36 Logical CPU number */
#ifdef CONFIG_CC_STACKPROTECTOR
	unsigned long stack_canary;	/* 40 stack canary value */
					/* gcc-ABI: this canary MUST be at
					   offset 40!!! */
#endif
	char *irqstackptr;
	int nodenumber;		    /* number of current node */
	unsigned int __softirq_pending;
	unsigned int __nmi_count;	/* number of NMI on this CPUs */
	short mmu_state;
	short isidle;
	struct mm_struct *active_mm;
	unsigned apic_timer_irqs;
	unsigned irq0_irqs;
	unsigned irq_resched_count;
	unsigned irq_call_count;
	unsigned irq_tlb_count;
	unsigned irq_thermal_count;
	unsigned irq_threshold_count;
	unsigned irq_spurious_count;
} ____cacheline_aligned_in_smp;

extern struct x8664_pda *_cpu_pda[];
extern struct x8664_pda boot_cpu_pda[];

#define cpu_pda(i) (_cpu_pda[i])

/* 
 * There is no fast way to get the base address of the PDA, all the accesses
 * have to mention %fs/%gs.  So it needs to be done this Torvaldian way.
 */ 
extern void __bad_pda_field(void) __attribute__((noreturn));

/*
 * proxy_pda doesn't actually exist, but tell gcc it is accessed for
 * all PDA accesses so it gets read/write dependencies right.
 */
extern struct x8664_pda _proxy_pda;

#define pda_offset(field) offsetof(struct x8664_pda, field)

#define pda_to_op(op,field,val) do {		\
	typedef typeof(_proxy_pda.field) T__;	\
	if (0) { T__ tmp__; tmp__ = (val); }	/* type checking */ \
	switch (sizeof(_proxy_pda.field)) {	\
	case 2:					\
		asm(op "w %1,%%gs:%c2" : 	\
		    "+m" (_proxy_pda.field) :	\
		    "ri" ((T__)val),		\
		    "i"(pda_offset(field))); 	\
 		break;				\
	case 4:					\
		asm(op "l %1,%%gs:%c2" : 	\
		    "+m" (_proxy_pda.field) :	\
		    "ri" ((T__)val),		\
		    "i" (pda_offset(field))); 	\
		break;				\
	case 8:					\
		asm(op "q %1,%%gs:%c2": 	\
		    "+m" (_proxy_pda.field) :	\
		    "ri" ((T__)val),		\
		    "i"(pda_offset(field))); 	\
		break;				\
       default: 				\
		__bad_pda_field();		\
       }					\
       } while (0)

#define pda_from_op(op,field) ({		\
	typeof(_proxy_pda.field) ret__;		\
	switch (sizeof(_proxy_pda.field)) {	\
       	case 2:					\
		asm(op "w %%gs:%c1,%0" : 	\
		    "=r" (ret__) :		\
		    "i" (pda_offset(field)), 	\
		    "m" (_proxy_pda.field)); 	\
		 break;				\
	case 4:					\
		asm(op "l %%gs:%c1,%0":		\
		    "=r" (ret__):		\
		    "i" (pda_offset(field)), 	\
		    "m" (_proxy_pda.field)); 	\
		 break;				\
       case 8:					\
		asm(op "q %%gs:%c1,%0":		\
		    "=r" (ret__) :		\
		    "i" (pda_offset(field)), 	\
		    "m" (_proxy_pda.field)); 	\
		 break;				\
       default: 				\
		__bad_pda_field();		\
       }					\
       ret__; })

#define read_pda(field) pda_from_op("mov",field)
#define write_pda(field,val) pda_to_op("mov",field,val)
#define add_pda(field,val) pda_to_op("add",field,val)
#define sub_pda(field,val) pda_to_op("sub",field,val)
#define or_pda(field,val) pda_to_op("or",field,val)

/* This is not atomic against other CPUs -- CPU preemption needs to be off */
#define test_and_clear_bit_pda(bit,field) ({		\
	int old__;						\
	asm volatile("btr %2,%%gs:%c3\n\tsbbl %0,%0"		\
	    : "=r" (old__), "+m" (_proxy_pda.field) 		\
	    : "dIr" (bit), "i" (pda_offset(field)) : "memory");	\
	old__;							\
})

#endif

#define PDA_STACKOFFSET (5*8)

#endif